From acc659a3a3986d830af1fd61d64a4a11eb7f7c3b Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Wed, 2 Mar 2016 21:44:28 -0800 Subject: [PATCH 001/702] ENH: Add writer for Siemens CSA header Allows us to take a parsed CSA header and convert it back into a string. Useful for things like DICOM anonymization, or perhaps round tripping DICOM -> Nifti -> DICOM. --- nibabel/nicom/csareader.py | 110 ++++++++++++++++++++++++++ nibabel/nicom/tests/test_csareader.py | 11 +++ 2 files changed, 121 insertions(+) diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 1764e2878c..b2b87b866f 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -2,6 +2,7 @@ ''' import numpy as np +import struct from .structreader import Unpacker from .utils import find_private_section @@ -29,6 +30,10 @@ class CSAReadError(CSAError): pass +class CSAWriteError(CSAError): + pass + + def get_csa_header(dcm_data, csa_type='image'): ''' Get CSA header information from DICOM header @@ -162,6 +167,96 @@ def read(csa_str): return csa_dict +def write(csa_header): + ''' Write string from CSA header `csa_header` + + Parameters + ---------- + csa_header : dict + header information as dict, where `header` has fields (at least) + ``type, n_tags, tags``. ``header['tags']`` is also a dictionary + with one key, value pair for each tag in the header. + + Returns + ------- + csa_str : str + byte string containing CSA header information + ''' + result = [] + if csa_header['type'] == 2: + result.append(b'SV10') + result.append(csa_header['unused0']) + if not 0 < csa_header['n_tags'] <= 128: + raise CSAWriteError('Number of tags `t` should be ' + '0 < t <= 128') + result.append(struct.pack('2I', + csa_header['n_tags'], + csa_header['check']) + ) + + # Build list of tags in correct order + tags = list(csa_header['tags'].items()) + tags.sort(key=lambda x: x[1]['tag_no']) + tag0_n_items = tags[0][1]['n_items'] + + # Add the information for each tag + for tag_name, tag_dict in tags: + vm = tag_dict['vm'] + vr = tag_dict['vr'] + n_items = tag_dict['n_items'] + assert n_items < 100 + result.append(struct.pack('64si4s3i', + make_nt_str(tag_name), + vm, + make_nt_str(vr), + tag_dict['syngodt'], + n_items, + tag_dict['last3']) + ) + + # Figure out the number of values for this tag + if vm == 0: + n_values = n_items + else: + n_values = vm + + # Add each item for this tag + for item_no in range(n_items): + # Figure out the item length + if item_no >= n_values or tag_dict['items'][item_no] == '': + item_len = 0 + else: + item = tag_dict['items'][item_no] + if not isinstance(item, str): + item = str(item) + item_nt_str = make_nt_str(item) + item_len = len(item_nt_str) + + # These values aren't actually preserved in the dict + # representation of the header. Best we can do is set the ones + # that determine the item length appropriately. + x0, x1, x2, x3 = 0, 0, 0, 0 + if csa_header['type'] == 1: # CSA1 - odd length calculation + x0 = tag0_n_items + item_len + if item_len < 0 or (ptr + item_len) > csa_len: + if item_no < vm: + items.append('') + break + else: # CSA2 + x1 = item_len + result.append(struct.pack('4i', x0, x1, x2, x3)) + + if item_len == 0: + continue + + result.append(item_nt_str) + # go to 4 byte boundary + plus4 = item_len % 4 + if plus4 != 0: + result.append(b'\x00' * (4 - plus4)) + return b''.join(result) + + def get_scalar(csa_dict, tag_name): try: items = csa_dict['tags'][tag_name]['items'] @@ -259,3 +354,18 @@ def nt_str(s): if zero_pos == -1: return s return s[:zero_pos].decode('latin-1') + + +def make_nt_str(s): + ''' Create a null terminated byte string from a unicode object. + + Parameters + ---------- + s : unicode + + Returns + ------- + result : bytes + s encoded as latin-1 with a null char appended + ''' + return s.encode('latin-1') + b'\x00' diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 1692aad622..ba644a09ff 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -136,3 +136,14 @@ def test_missing_csa_elem(): del dcm[csa_tag] hdr = csa.get_csa_header(dcm, 'image') assert hdr is None + + +def test_read_write_rt(): + # Try doing a read-write-read round trip and make sure the dictionary + # representation of the header is the same. We can't exactly reproduce the + # original string representation currently. + for csa_str in (CSA2_B0, CSA2_B1000): + csa_info = csa.read(csa_str) + new_csa_str = csa.write(csa_info) + new_csa_info = csa.read(new_csa_str) + assert csa_info == new_csa_info From 122a923dfb5b55f22487d4c3f072391f1dcc2afd Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Mon, 23 Mar 2020 20:01:44 -0700 Subject: [PATCH 002/702] CLN: Cleanup whitespace and formatting --- nibabel/nicom/csareader.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index b2b87b866f..98d06557f2 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -188,11 +188,11 @@ def write(csa_header): result.append(csa_header['unused0']) if not 0 < csa_header['n_tags'] <= 128: raise CSAWriteError('Number of tags `t` should be ' - '0 < t <= 128') + '0 < t <= 128') result.append(struct.pack('2I', csa_header['n_tags'], csa_header['check']) - ) + ) # Build list of tags in correct order tags = list(csa_header['tags'].items()) @@ -212,7 +212,7 @@ def write(csa_header): tag_dict['syngodt'], n_items, tag_dict['last3']) - ) + ) # Figure out the number of values for this tag if vm == 0: @@ -242,7 +242,7 @@ def write(csa_header): if item_no < vm: items.append('') break - else: # CSA2 + else: # CSA2 x1 = item_len result.append(struct.pack('4i', x0, x1, x2, x3)) From 9785ea9429fc4803ac5029cbfbd20f19eecef6b4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 00:08:17 +0100 Subject: [PATCH 003/702] REL: 4.0.0 --- Changelog | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Changelog b/Changelog index bbb39a463d..d3ead60db3 100644 --- a/Changelog +++ b/Changelog @@ -25,8 +25,8 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. -4.0.0 (To be determined) -======================== +4.0.0 (Saturday 18 June 2022) +============================= New feature release in the 4.0.x series. From e042875441883068efecad703e6111378af5e3c2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 00:13:09 +0100 Subject: [PATCH 004/702] MNT: Set fallback version to 4.0.1.dev0 --- nibabel/info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/info.py b/nibabel/info.py index 7d9278f1f9..2d10a7300a 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -12,8 +12,8 @@ # development (pre-release) version. _version_major = 4 _version_minor = 0 -_version_micro = 0 -_version_extra = 'rc1.dev0' +_version_micro = 1 +_version_extra = '.dev0' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" VERSION = f"{_version_major}.{_version_minor}.{_version_micro}{_version_extra}" From 99602406e841809d15d4d590e15214bd31ec05b4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 00:14:25 +0100 Subject: [PATCH 005/702] MNT: Set fallback version to 4.1.0.dev0 --- nibabel/info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/info.py b/nibabel/info.py index 7d9278f1f9..ad5d473f74 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -11,9 +11,9 @@ # This should be set to the intended next version + dev to indicate a # development (pre-release) version. _version_major = 4 -_version_minor = 0 +_version_minor = 1 _version_micro = 0 -_version_extra = 'rc1.dev0' +_version_extra = '.dev0' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" VERSION = f"{_version_major}.{_version_minor}.{_version_micro}{_version_extra}" From 84c60034f9afdffdb1d3ef7c25953377f077e85f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 00:59:06 +0100 Subject: [PATCH 006/702] TEST: Convert tests for DeprecationWarnings to ExpiredDeprecationErrors --- nibabel/freesurfer/tests/test_mghformat.py | 42 ++---------- nibabel/gifti/tests/test_gifti.py | 68 ++++++------------- nibabel/gifti/tests/test_giftiio.py | 9 ++- nibabel/gifti/tests/test_parse_gifti_fast.py | 48 +++++-------- nibabel/nicom/tests/test_dicomwrappers.py | 5 +- .../streamlines/tests/test_array_sequence.py | 3 +- nibabel/tests/test_ecat.py | 17 ++--- nibabel/tests/test_image_api.py | 14 ++-- nibabel/tests/test_image_load_save.py | 31 ++------- nibabel/tests/test_imageclasses.py | 15 ++-- nibabel/tests/test_openers.py | 7 +- nibabel/tests/test_orientations.py | 7 +- nibabel/tests/test_parrec.py | 3 +- nibabel/tests/test_spatialimages.py | 9 +-- nibabel/tests/test_volumeutils.py | 51 +------------- 15 files changed, 86 insertions(+), 243 deletions(-) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 3ff9f9114c..9c75d06208 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -22,6 +22,7 @@ from ...volumeutils import sys_is_le from ...wrapstruct import WrapStructError from ... import imageglobals +from ...deprecator import ExpiredDeprecationError import pytest @@ -344,42 +345,13 @@ def test_deprecated_fields(): hdr_data = MGHHeader._HeaderData(hdr.structarr) # mrparams is the only deprecated field at the moment - # Accessing hdr_data is equivalent to accessing hdr, so double all checks - with pytest.deprecated_call(match="from version: 2.3"): - assert_array_equal(hdr['mrparams'], 0) - assert_array_equal(hdr_data['mrparams'], 0) - - with pytest.deprecated_call(match="from version: 2.3"): + # Accessing hdr_data is equivalent to accessing hdr, so double all checks, + # but expect success on hdr_data['mrparams'] + with pytest.raises(ExpiredDeprecationError): + hdr['mrparams'] + with pytest.raises(ExpiredDeprecationError): hdr['mrparams'] = [1, 2, 3, 4] - with pytest.deprecated_call(match="from version: 2.3"): - assert_array_almost_equal(hdr['mrparams'], [1, 2, 3, 4]) - assert hdr['tr'] == 1 - assert hdr['flip_angle'] == 2 - assert hdr['te'] == 3 - assert hdr['ti'] == 4 - assert hdr['fov'] == 0 - assert_array_almost_equal(hdr_data['mrparams'], [1, 2, 3, 4]) - assert hdr_data['tr'] == 1 - assert hdr_data['flip_angle'] == 2 - assert hdr_data['te'] == 3 - assert hdr_data['ti'] == 4 - assert hdr_data['fov'] == 0 - - hdr['tr'] = 5 - hdr['flip_angle'] = 6 - hdr['te'] = 7 - hdr['ti'] = 8 - with pytest.deprecated_call(match="from version: 2.3"): - assert_array_almost_equal(hdr['mrparams'], [5, 6, 7, 8]) - assert_array_almost_equal(hdr_data['mrparams'], [5, 6, 7, 8]) - - hdr_data['tr'] = 9 - hdr_data['flip_angle'] = 10 - hdr_data['te'] = 11 - hdr_data['ti'] = 12 - with pytest.deprecated_call(match="from version: 2.3"): - assert_array_almost_equal(hdr['mrparams'], [9, 10, 11, 12]) - assert_array_almost_equal(hdr_data['mrparams'], [9, 10, 11, 12]) + assert_array_equal(hdr_data['mrparams'], 0) class TestMGHImage(tsi.TestSpatialImage, tsi.MmapImageMixin): diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 4363c2cd0a..82cc8e25de 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -15,10 +15,11 @@ from ..gifti import data_tag from ...nifti1 import data_type_codes from ...fileholders import FileHolder +from ...deprecator import ExpiredDeprecationError from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest -from ...testing import clear_and_catch_warnings, test_data +from ...testing import test_data from .test_parse_gifti_fast import (DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6) import itertools @@ -183,46 +184,29 @@ def test_dataarray_init(): def test_dataarray_from_array(): - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) - da = GiftiDataArray.from_array(np.ones((3, 4))) - assert len(w) == 1 - for dt_code in data_type_codes.value_set(): - data_type = data_type_codes.type[dt_code] - if data_type is np.void: # not supported - continue - arr = np.zeros((10, 3), dtype=data_type) - da = GiftiDataArray.from_array(arr, 'triangle') - assert da.datatype == data_type_codes[arr.dtype] - bs_arr = arr.byteswap().newbyteorder() - da = GiftiDataArray.from_array(bs_arr, 'triangle') - assert da.datatype == data_type_codes[arr.dtype] + with pytest.raises(ExpiredDeprecationError): + GiftiDataArray.from_array(np.ones((3, 4))) def test_to_xml_open_close_deprecations(): # Smoke test on deprecated functions da = GiftiDataArray(np.ones((1,)), 'triangle') - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) - assert isinstance(da.to_xml_open(), str) - assert len(w) == 1 - with clear_and_catch_warnings() as w: - warnings.filterwarnings('once', category=DeprecationWarning) - assert isinstance(da.to_xml_close(), str) - assert len(w) == 1 + with pytest.raises(ExpiredDeprecationError): + da.to_xml_open() + with pytest.raises(ExpiredDeprecationError): + da.to_xml_close() def test_num_dim_deprecation(): da = GiftiDataArray(np.ones((2, 3, 4))) # num_dim is property, set automatically from len(da.dims) assert da.num_dim == 3 - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) - # OK setting num_dim to correct value, but raises DeprecationWarning + # setting num_dim to correct value is deprecated + with pytest.raises(ExpiredDeprecationError): da.num_dim = 3 - assert len(w) == 1 - # Any other value gives a ValueError - pytest.raises(ValueError, setattr, da, 'num_dim', 4) + # setting num_dim to incorrect value is also deprecated + with pytest.raises(ExpiredDeprecationError): + da.num_dim = 4 def test_labeltable(): @@ -235,14 +219,10 @@ def test_labeltable(): assert len(img.labeltable.labels) == 2 # Test deprecations - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) + with pytest.raises(ExpiredDeprecationError): newer_table = GiftiLabelTable() newer_table.labels += ['test', 'me', 'again'] img.set_labeltable(newer_table) - assert len(w) == 1 - assert len(img.get_labeltable().labels) == 3 - assert len(w) == 2 def test_metadata(): @@ -261,14 +241,8 @@ def test_metadata(): assert md.data[0].value == 'value' assert len(w) == 2 # Test deprecation - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) - assert md.get_metadata() == dict(key='value') - assert len(w) == 1 - assert md.metadata == dict(key='value') - assert len(w) == 2 - assert len(GiftiDataArray().get_metadata()) == 0 - assert len(w) == 3 + with pytest.raises(ExpiredDeprecationError): + md.get_metadata() def test_gifti_label_rgba(): @@ -295,10 +269,8 @@ def assign_rgba(gl, val): pytest.raises(ValueError, assign_rgba, gl3, rgba.tolist() + rgba.tolist()) # Test deprecation - with clear_and_catch_warnings() as w: - warnings.filterwarnings('once', category=DeprecationWarning) - assert kwargs['red'] == gl3.get_rgba()[0] - assert len(w) == 1 + with pytest.raises(ExpiredDeprecationError): + gl3.get_rgba() # Test default value gl4 = GiftiLabel() @@ -325,10 +297,8 @@ def test_gifti_coord(): def test_data_tag_deprecated(): - with clear_and_catch_warnings() as w: - warnings.filterwarnings('once', category=DeprecationWarning) + with pytest.raises(ExpiredDeprecationError): data_tag(np.array([]), 'ASCII', '%i', 1) - assert len(w) == 1 def test_gifti_round_trip(): diff --git a/nibabel/gifti/tests/test_giftiio.py b/nibabel/gifti/tests/test_giftiio.py index 8269618b0c..f2e2458120 100644 --- a/nibabel/gifti/tests/test_giftiio.py +++ b/nibabel/gifti/tests/test_giftiio.py @@ -7,16 +7,19 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +from ..gifti import GiftiImage from ..giftiio import read, write from .test_parse_gifti_fast import DATA_FILE1 +from ...deprecator import ExpiredDeprecationError import pytest def test_read_deprecated(tmp_path): - with pytest.deprecated_call(): - img = read(DATA_FILE1) + with pytest.raises(ExpiredDeprecationError): + read(DATA_FILE1) + img = GiftiImage.from_filename(DATA_FILE1) fname = tmp_path / 'test.gii' - with pytest.deprecated_call(): + with pytest.raises(ExpiredDeprecationError): write(img, fname) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index c1207d41fb..b7ca2b7f4e 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -22,6 +22,7 @@ from ...loadsave import load, save from ...nifti1 import xform_codes from ...tmpdirs import InTemporaryDirectory +from ...deprecator import ExpiredDeprecationError from numpy.testing import assert_array_almost_equal @@ -48,8 +49,8 @@ DATA_FILE1_darr1 = np.array( [[-16.07201, -66.187515, 21.266994], - [-16.705893, -66.054337, 21.232786], - [-17.614349, -65.401642, 21.071466]]) + [-16.705893, -66.054337, 21.232786], + [-17.614349, -65.401642, 21.071466]]) DATA_FILE1_darr2 = np.array([0, 1, 2]) DATA_FILE2_darr1 = np.array([[0.43635699], @@ -189,14 +190,11 @@ def test_metadata_deprecations(): me = img.meta # Test deprecation - with clear_and_catch_warnings() as w: - warnings.filterwarnings('once', category=DeprecationWarning) - assert me == img.get_meta() + with pytest.raises(ExpiredDeprecationError): + img.get_meta() - with clear_and_catch_warnings() as w: - warnings.filterwarnings('once', category=DeprecationWarning) + with pytest.raises(ExpiredDeprecationError): img.set_metadata(me) - assert me == img.meta def test_load_dataarray1(): @@ -321,12 +319,8 @@ def test_load_getbyintent(): da = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET") assert len(da) == 1 - with clear_and_catch_warnings() as w: - warnings.filterwarnings('once', category=DeprecationWarning) - da = img.getArraysFromIntent("NIFTI_INTENT_POINTSET") - assert len(da) == 1 - assert len(w) == 1 - w[0].category == DeprecationWarning + with pytest.raises(ExpiredDeprecationError): + img.getArraysFromIntent("NIFTI_INTENT_POINTSET") da = img.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE") assert len(da) == 1 @@ -360,16 +354,11 @@ def test_labeltable_deprecations(): lt = img.labeltable # Test deprecation - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) - assert lt == img.get_labeltable() - assert len(w) == 1 + with pytest.raises(ExpiredDeprecationError): + img.get_labeltable() - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) + with pytest.raises(ExpiredDeprecationError): img.set_labeltable(lt) - assert len(w) == 1 - assert lt == img.labeltable def test_parse_dataarrays(): @@ -395,16 +384,11 @@ def test_parse_dataarrays(): def test_parse_deprecated(): # Test deprecation - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) - op = Outputter() - assert len(w) == 1 - op.initialize() # smoke test--no error. - - with clear_and_catch_warnings() as w: - warnings.filterwarnings('always', category=DeprecationWarning) - pytest.raises(ValueError, parse_gifti_file) - assert len(w) == 1 + with pytest.raises(ExpiredDeprecationError): + Outputter() + + with pytest.raises(ExpiredDeprecationError): + parse_gifti_file() def test_parse_with_buffersize(): diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index f5d1ba366d..11b5b482b9 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -13,6 +13,7 @@ from .. import dicomwrappers as didw from .. import dicomreaders as didr from ...volumeutils import endian_codes +from ...deprecator import ExpiredDeprecationError import pytest from unittest import TestCase @@ -631,8 +632,8 @@ def test_affine(self): # Make sure we find orientation/position/spacing info dw = didw.wrapper_from_file(DATA_FILE_4D) aff = dw.affine - with pytest.deprecated_call(): - assert np.array_equal(dw.get_affine(), aff) + with pytest.raises(ExpiredDeprecationError): + dw.get_affine() @dicom_test def test_data_real(self): diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index 53ad364b46..4a5c21aa2e 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -10,6 +10,7 @@ from numpy.testing import assert_array_equal from ..array_sequence import ArraySequence, is_array_sequence, concatenate +from ...deprecator import ExpiredDeprecationError SEQ_DATA = {} @@ -96,7 +97,7 @@ def test_creating_arraysequence_from_list(self): def test_deprecated_data_attribute(self): seq = ArraySequence(SEQ_DATA['data']) - with pytest.deprecated_call(match="from version: 3.0"): + with pytest.raises(ExpiredDeprecationError): seq.data def test_creating_arraysequence_from_generator(self): diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index f0d50480d8..607345e473 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -23,6 +23,7 @@ from ..testing import data_path, suppress_warnings from ..tmpdirs import InTemporaryDirectory +from ..deprecator import ExpiredDeprecationError from . import test_wrapstruct as tws from .test_fileslice import slicer_samples @@ -240,9 +241,8 @@ def test_isolation(self): assert not np.all(img.affine == aff) def test_get_affine_deprecated(self): - with pytest.deprecated_call(match="from version: 2.1"): - aff = self.img.get_affine() - assert np.array_equal(aff, self.img.affine) + with pytest.raises(ExpiredDeprecationError): + self.img.get_affine() def test_float_affine(self): # Check affines get converted to float @@ -275,12 +275,5 @@ def test_mlist_regression(self): def test_from_filespec_deprecation(): - # Check from_filespec raises Deprecation - with pytest.deprecated_call() as w: - # No warning for standard load - img_loaded = EcatImage.load(ecat_file) - assert len(w) == 0 - # Warning for from_filespec - img_speced = EcatImage.from_filespec(ecat_file) - assert len(w) == 1 - assert_array_equal(img_loaded.get_fdata(), img_speced.get_fdata()) + with pytest.raises(ExpiredDeprecationError): + EcatImage.from_filespec(ecat_file) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 0519af071d..afc04d709a 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -40,6 +40,7 @@ MGHImage, Minc1Image, Minc2Image, is_proxy) from ..spatialimages import SpatialImage from .. import minc1, minc2, parrec, brikhead +from ..deprecator import ExpiredDeprecationError import unittest import pytest @@ -120,9 +121,8 @@ def validate_header(self, imaker, params): def validate_header_deprecated(self, imaker, params): # Check deprecated header API img = imaker() - with pytest.deprecated_call(): + with pytest.raises(ExpiredDeprecationError): hdr = img.get_header() - assert hdr is img.header def validate_filenames(self, imaker, params): # Validate the filename, file_map interface @@ -420,7 +420,7 @@ def _check_array_caching(self, imaker, meth_name, caching): def validate_data_deprecated(self, imaker, params): # Check _data property still exists, but raises warning img = imaker() - with pytest.deprecated_call(): + with pytest.raises(ExpiredDeprecationError): assert_data_similar(img._data, params) # Check setting _data raises error fake_data = np.zeros(img.shape).astype(img.get_data_dtype()) @@ -519,12 +519,8 @@ def validate_affine(self, imaker, params): def validate_affine_deprecated(self, imaker, params): # Check deprecated affine API img = imaker() - with pytest.deprecated_call(): - assert_almost_equal(img.get_affine(), params['affine'], 6) - assert img.get_affine().dtype == np.float64 - aff = img.get_affine() - aff[0, 0] = 1.5 - assert aff is img.get_affine() + with pytest.raises(ExpiredDeprecationError): + img.get_affine() class SerializeMixin(object): diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 1535d5838f..07c8bf8c5d 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -29,6 +29,7 @@ from ..volumeutils import native_code, swapped_code from ..optpkg import optional_package from ..spatialimages import SpatialImage +from ..deprecator import ExpiredDeprecationError from numpy.testing import assert_array_equal, assert_array_almost_equal import pytest @@ -273,33 +274,9 @@ def test_filename_save(): def test_analyze_detection(): # Test detection of Analyze, Nifti1 and Nifti2 # Algorithm is as described in loadsave:which_analyze_type - def wat(hdr): - with pytest.deprecated_call(): - return nils.which_analyze_type(hdr.binaryblock) - n1_hdr = Nifti1Header(b'\0' * 348, check=False) - assert wat(n1_hdr) is None - n1_hdr['sizeof_hdr'] = 540 - assert wat(n1_hdr) == 'nifti2' - assert wat(n1_hdr.as_byteswapped()) == 'nifti2' - n1_hdr['sizeof_hdr'] = 348 - assert wat(n1_hdr) == 'analyze' - assert wat(n1_hdr.as_byteswapped()) == 'analyze' - n1_hdr['magic'] = b'n+1' - assert wat(n1_hdr) == 'nifti1' - assert wat(n1_hdr.as_byteswapped()) == 'nifti1' - n1_hdr['magic'] = b'ni1' - assert wat(n1_hdr) == 'nifti1' - assert wat(n1_hdr.as_byteswapped()) == 'nifti1' - # Doesn't matter what magic is if it's not a nifti1 magic - n1_hdr['magic'] = b'ni2' - assert wat(n1_hdr) == 'analyze' - n1_hdr['sizeof_hdr'] = 0 - n1_hdr['magic'] = b'' - assert wat(n1_hdr) is None - n1_hdr['magic'] = 'n+1' - assert wat(n1_hdr) == 'nifti1' - n1_hdr['magic'] = 'ni1' - assert wat(n1_hdr) == 'nifti1' + hdr = Nifti1Header(b'\0' * 348, check=False) + with pytest.raises(ExpiredDeprecationError): + nils.which_analyze_type(hdr.binaryblock) def test_guessed_image_type(): diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index fd61fef36f..43096e4347 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -6,6 +6,8 @@ import numpy as np +import pytest + import nibabel as nib from nibabel.analyze import AnalyzeImage from nibabel.nifti1 import Nifti1Image @@ -15,8 +17,7 @@ from nibabel.imageclasses import spatial_axes_first, class_map, ext_map from nibabel.optpkg import optional_package -from nibabel.testing import clear_and_catch_warnings - +from nibabel.deprecator import ExpiredDeprecationError have_h5py = optional_package('h5py')[1] @@ -51,12 +52,8 @@ def test_spatial_axes_first(): def test_deprecations(): - with clear_and_catch_warnings(modules=[imageclasses]) as w: - warnings.filterwarnings('always', category=DeprecationWarning) - nifti_single = class_map['nifti_single'] - assert nifti_single['class'] == Nifti1Image - assert len(w) == 1 + with pytest.raises(ExpiredDeprecationError): + class_map['nifti_single'] + with pytest.raises(ExpiredDeprecationError): nifti_ext = ext_map['.nii'] - assert nifti_ext == 'nifti_single' - assert len(w) == 2 diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 9c8f53a5b6..541af368c5 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -29,7 +29,7 @@ import unittest from unittest import mock import pytest -from ..testing import error_warnings +from ..deprecator import ExpiredDeprecationError pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") @@ -115,9 +115,8 @@ def test_Opener_various(): def test_BinOpener(): - with error_warnings(): - with pytest.raises(DeprecationWarning): - BinOpener('test.txt', 'r') + with pytest.raises(ExpiredDeprecationError): + BinOpener('test.txt', 'r') class MockIndexedGzipFile(GzipFile): diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 24a22a086b..22d899c4dc 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -21,6 +21,7 @@ orientation_affine) from ..affines import from_matvec, to_matvec +from ..deprecator import ExpiredDeprecationError IN_ARRS = [np.eye(4), @@ -355,10 +356,8 @@ def test_inv_ornt_aff(): def test_orientation_affine_deprecation(): - aff1 = inv_ornt_aff([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) - with pytest.deprecated_call(): - aff2 = orientation_affine([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) - assert_array_equal(aff1, aff2) + with pytest.raises(ExpiredDeprecationError): + orientation_affine([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) def test_flip_axis_deprecation(): diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 92e3dc5a29..9a8f2b1dfc 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -23,6 +23,7 @@ import pytest from ..testing import (clear_and_catch_warnings, suppress_warnings, assert_arr_dict_equal) +from ..deprecator import ExpiredDeprecationError from .test_arrayproxy import check_mmap from . import test_spatialimages as tsi @@ -263,7 +264,7 @@ def test_affine_regression(): def test_get_voxel_size_deprecated(): hdr = PARRECHeader(HDR_INFO, HDR_DEFS) - with pytest.deprecated_call(): + with pytest.raises(ExpiredDeprecationError): hdr.get_voxel_size() diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 283ad0d3b4..dd707aa242 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -608,10 +608,7 @@ def test_load_mmap(self): def test_header_deprecated(): - with pytest.deprecated_call() as w: - class MyHeader(Header): - pass - - assert len(w) == 0 + class MyHeader(Header): + pass + with pytest.raises(ExpiredDeprecationError): MyHeader() - assert len(w) == 1 diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 2fe57a232c..b58cbeb60a 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -1040,58 +1040,11 @@ def test_fname_ext_ul_case(): assert fname_ext_ul_case('afile.TxT') == 'afile.TxT' -def test_allopen(): +def test_allopen_deprecated(): # This import into volumeutils is for compatibility. The code is the # ``openers`` module. - with pytest.deprecated_call() as w: - # Test default mode is 'rb' + with pytest.raises(ExpiredDeprecationError): fobj = allopen(__file__) - # Check we got the deprecation warning - assert len(w) == 1 - assert fobj.mode == 'rb' - # That we can set it - fobj = allopen(__file__, 'r') - assert fobj.mode == 'r' - # with keyword arguments - fobj = allopen(__file__, mode='r') - assert fobj.mode == 'r' - # fileobj returns fileobj - msg = b'tiddle pom' - sobj = BytesIO(msg) - fobj = allopen(sobj) - assert fobj.read() == msg - # mode is gently ignored - fobj = allopen(sobj, mode='r') - - -def test_allopen_compresslevel(): - # We can set the default compression level with the module global - # Get some data to compress - with open(__file__, 'rb') as fobj: - my_self = fobj.read() - # Prepare loop - fname = 'test.gz' - sizes = {} - # Stash module global - from .. import volumeutils as vu - original_compress_level = vu.default_compresslevel - assert original_compress_level == 1 - try: - with InTemporaryDirectory(): - for compresslevel in ('default', 1, 9): - if compresslevel != 'default': - vu.default_compresslevel = compresslevel - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - with allopen(fname, 'wb') as fobj: - fobj.write(my_self) - with open(fname, 'rb') as fobj: - my_selves_smaller = fobj.read() - sizes[compresslevel] = len(my_selves_smaller) - assert sizes['default'] == sizes[1] - assert sizes[1] > sizes[9] - finally: - vu.default_compresslevel = original_compress_level def test_shape_zoom_affine(): From d1b66c5ba3525a5a724c15653d7a657b9871e255 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 02:05:39 +0100 Subject: [PATCH 007/702] TEST: Update removalschedule --- nibabel/tests/test_removalschedule.py | 43 +++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 1563a588ec..30cd0f83d2 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -12,8 +12,25 @@ ] OBJECT_SCHEDULE = [ + ("6.0.0", [("nibabel.loadsave", "guessed_image_type"), + ("nibabel.loadsave", "read_img_data"), + ("nibabel.orientations", "flip_axis"), + ]), ("5.0.0", [("nibabel.pydicom_compat", "dicom_test"), - ("nibabel.onetime", "setattr_on_read")]), + ("nibabel.onetime", "setattr_on_read"), + ("nibabel.gifti.gifti", "data_tag"), + ("nibabel.gifti.giftiio", "read"), + ("nibabel.gifti.giftiio", "write"), + ("nibabel.gifti.parse_gifti_fast", "Outputter"), + ("nibabel.gifti.parse_gifti_fast", "parse_gifti_file"), + ("nibabel.imageclasses", "ext_map"), + ("nibabel.imageclasses", "class_map"), + ("nibabel.loadsave", "which_analyze_type"), + ("nibabel.volumeutils", "BinOpener"), + ("nibabel.volumeutils", "allopen"), + ("nibabel.orientations", "orientation_affine"), + ("nibabel.spatialimages", "Header"), + ]), ("4.0.0", [("nibabel.minc1", "MincFile"), ("nibabel.minc1", "MincImage")]), ("3.0.0", [("nibabel.testing", "catch_warn_reset")]), @@ -22,8 +39,30 @@ ] ATTRIBUTE_SCHEDULE = [ + ("7.0.0", [("nibabel.gifti.gifti", "GiftiMetaData", "from_dict"), + ("nibabel.gifti.gifti", "GiftiMetaData", "metadata"), + ]), ("5.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_data"), - ("nibabel.freesurfer.mghformat", "MGHHeader", "_header_data")]), + ("nibabel.freesurfer.mghformat", "MGHHeader", "_header_data"), + ("nibabel.gifti.gifti", "GiftiDataArray", "num_dim"), + ("nibabel.gifti.gifti", "GiftiDataArray", "from_array"), + ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_open"), + ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_close"), + ("nibabel.gifti.gifti", "GiftiDataArray", "get_metadata"), + ("nibabel.gifti.gifti", "GiftiImage", "get_labeltable"), + ("nibabel.gifti.gifti", "GiftiImage", "set_labeltable"), + ("nibabel.gifti.gifti", "GiftiImage", "get_metadata"), + ("nibabel.gifti.gifti", "GiftiImage", "set_metadata"), + ("nibabel.gifti.gifti", "GiftiImage", "getArraysFromIntent"), + ("nibabel.gifti.gifti", "GiftiImage", "getArraysFromIntent"), + ("nibabel.gifti.gifti", "GiftiMetaData", "get_metadata"), + ("nibabel.gifti.gifti", "GiftiLabel", "get_rgba"), + ("nibabel.nicom.dicomwrappers", "Wrapper", "get_affine"), + ("nibabel.streamlines.array_sequence", "ArraySequence", "data"), + ("nibabel.ecat", "EcatImage", "from_filespec"), + ("nibabel.filebasedimages", "FileBasedImage", "get_header"), + ("nibabel.spatialimages", "SpatialImage", "get_affine"), + ]), ("4.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_shape"), ("nibabel.filebasedimages", "FileBasedImage", "filespec_to_files"), ("nibabel.filebasedimages", "FileBasedImage", "to_filespec"), From f469c687ec10d599fa17a6f39afb4c7b512c8a2a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 02:41:00 +0100 Subject: [PATCH 008/702] DOC: Add skipped PR to changelog --- Changelog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Changelog b/Changelog index d3ead60db3..c485ab5966 100644 --- a/Changelog +++ b/Changelog @@ -32,6 +32,8 @@ New feature release in the 4.0.x series. New features ------------ +* ``nib-convert`` CLI tool to make image type and data dtype conversion accessible + via the command line. (pr/1113) (CM, reviewed by Ariel Rokem) * Add ``'mask'``, ``'compat'`` and ``'smallest'`` dtype aliases to NIfTI images to allow for dtype specifications that can depend on the contents of the data. ``'mask'`` is a synonym for ``uint8``. ``'compat'`` will find the nearest From 4e770240aef7851e86fd650fb42979c9373fa39a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 02:45:14 +0100 Subject: [PATCH 009/702] TEST: Suppress new numpy warning on nan-to-int cast --- nibabel/tests/test_volumeutils.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index b58cbeb60a..237c87eaf2 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -413,7 +413,7 @@ def test_a2f_nan2zero(): # How weird? Look at arr.astype(np.int64) with np.errstate(invalid='ignore'): data_back = write_return(arr, str_io, np.int64, nan2zero=False) - assert_array_equal(data_back, arr.astype(np.int64)) + assert_array_equal(data_back, arr.astype(np.int64)) def test_a2f_nan2zero_scaling(): @@ -692,9 +692,15 @@ def test_a2f_nan2zero_range(): write_return(arr_no_nan, fobj, np.int8, intercept=129) # OK with nan2zero false, but we get whatever nan casts to with pytest.warns(warn_type) if warn_type else error_warnings(): - nan_cast = np.array(np.nan, dtype=dt).astype(np.int8) + # XXX NP1.24 + # Casting nan to int will produce a RuntimeWarning in numpy 1.24 + # Change to expecting this warning when this becomes our minimum + with np.errstate(invalid='ignore'): + nan_cast = np.array(np.nan, dtype=dt).astype(np.int8) with pytest.warns(warn_type) if warn_type else error_warnings(): - back_arr = write_return(arr, fobj, np.int8, intercept=129, nan2zero=False) + # XXX NP1.24 - expect RuntimeWarning + with np.errstate(invalid='ignore'): + back_arr = write_return(arr, fobj, np.int8, intercept=129, nan2zero=False) assert_array_equal([-128, -128, -128, nan_cast], back_arr) # divslope with pytest.warns(warn_type) if warn_type else error_warnings(): @@ -706,8 +712,10 @@ def test_a2f_nan2zero_range(): write_return(arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2) # OK with nan2zero false with pytest.warns(warn_type) if warn_type else error_warnings(): - back_arr = write_return(arr, fobj, np.int8, - intercept=257.1, divslope=2, nan2zero=False) + # XXX NP1.24 - expect RuntimeWarning + with np.errstate(invalid='ignore'): + back_arr = write_return(arr, fobj, np.int8, + intercept=257.1, divslope=2, nan2zero=False) assert_array_equal([-128, -128, -128, nan_cast], back_arr) From 7d3979d189b2f57d83d1abf9944bca95205d9077 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 12:40:59 +0100 Subject: [PATCH 010/702] TEST: Better warnings checks, rather than comments for future improvements --- nibabel/tests/test_volumeutils.py | 38 +++++++++++++++---------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 237c87eaf2..4994f94e48 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -20,6 +20,7 @@ import bz2 import threading import time +from packaging.version import Version import numpy as np @@ -68,6 +69,8 @@ IUINT_TYPES = INT_TYPES + np.sctypes['uint'] NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES +FP_RUNTIME_WARN = Version(np.__version__) >= Version('1.24.0.dev0+239') + def test__is_compressed_fobj(): # _is_compressed helper function @@ -672,18 +675,21 @@ def test_a2f_nan2zero_range(): arr = np.array([-1, 0, 1, np.nan], dtype=dt) # Error occurs for arrays without nans too arr_no_nan = np.array([-1, 0, 1, 2], dtype=dt) - warn_type = np.ComplexWarning if np.issubdtype(dt, np.complexfloating) else None + complex_warn = (np.ComplexWarning,) if np.issubdtype(dt, np.complexfloating) else () + # Casting nan to int will produce a RuntimeWarning in numpy 1.24 + nan_warn = (RuntimeWarning,) if FP_RUNTIME_WARN else () + c_and_n_warn = complex_warn + nan_warn # No errors from explicit thresholding # mn thresholding excluding zero - with pytest.warns(warn_type) if warn_type else error_warnings(): + with pytest.warns(complex_warn) if complex_warn else error_warnings(): assert_array_equal([1, 1, 1, 0], write_return(arr, fobj, np.int8, mn=1)) # mx thresholding excluding zero - with pytest.warns(warn_type) if warn_type else error_warnings(): + with pytest.warns(complex_warn) if complex_warn else error_warnings(): assert_array_equal([-1, -1, -1, 0], write_return(arr, fobj, np.int8, mx=-1)) # Errors from datatype threshold after scaling - with pytest.warns(warn_type) if warn_type else error_warnings(): + with pytest.warns(complex_warn) if complex_warn else error_warnings(): back_arr = write_return(arr, fobj, np.int8, intercept=128) assert_array_equal([-128, -128, -127, -128], back_arr) with pytest.raises(ValueError): @@ -691,19 +697,13 @@ def test_a2f_nan2zero_range(): with pytest.raises(ValueError): write_return(arr_no_nan, fobj, np.int8, intercept=129) # OK with nan2zero false, but we get whatever nan casts to - with pytest.warns(warn_type) if warn_type else error_warnings(): - # XXX NP1.24 - # Casting nan to int will produce a RuntimeWarning in numpy 1.24 - # Change to expecting this warning when this becomes our minimum - with np.errstate(invalid='ignore'): - nan_cast = np.array(np.nan, dtype=dt).astype(np.int8) - with pytest.warns(warn_type) if warn_type else error_warnings(): - # XXX NP1.24 - expect RuntimeWarning - with np.errstate(invalid='ignore'): - back_arr = write_return(arr, fobj, np.int8, intercept=129, nan2zero=False) + with pytest.warns(c_and_n_warn) if c_and_n_warn else error_warnings(): + nan_cast = np.array(np.nan, dtype=dt).astype(np.int8) + with pytest.warns(c_and_n_warn) if c_and_n_warn else error_warnings(): + back_arr = write_return(arr, fobj, np.int8, intercept=129, nan2zero=False) assert_array_equal([-128, -128, -128, nan_cast], back_arr) # divslope - with pytest.warns(warn_type) if warn_type else error_warnings(): + with pytest.warns(complex_warn) if complex_warn else error_warnings(): back_arr = write_return(arr, fobj, np.int8, intercept=256, divslope=2) assert_array_equal([-128, -128, -128, -128], back_arr) with pytest.raises(ValueError): @@ -711,11 +711,9 @@ def test_a2f_nan2zero_range(): with pytest.raises(ValueError): write_return(arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2) # OK with nan2zero false - with pytest.warns(warn_type) if warn_type else error_warnings(): - # XXX NP1.24 - expect RuntimeWarning - with np.errstate(invalid='ignore'): - back_arr = write_return(arr, fobj, np.int8, - intercept=257.1, divslope=2, nan2zero=False) + with pytest.warns(c_and_n_warn) if c_and_n_warn else error_warnings(): + back_arr = write_return(arr, fobj, np.int8, + intercept=257.1, divslope=2, nan2zero=False) assert_array_equal([-128, -128, -128, nan_cast], back_arr) From b0b16b811aaf7bb866ca764259c0cd20f8c03950 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 17:52:37 +0100 Subject: [PATCH 011/702] REL: 4.0.1 --- Changelog | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Changelog b/Changelog index c485ab5966..55586f87a3 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,21 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +4.0.1 (Saturday 18 June 2022) +============================= + +Bug-fix release in the 4.0.x series. + +Bug fixes +--------- +* Finalize 4.0 deprecations, converting tests expecting ``DeprecationWarning`` to + expected ``ExpiredDeprecationError`` (pr/1117) (CM) + +Maintenance +----------- +* Suppress new numpy warning on nan-to-int cast (pr/1118) (CM, reviewed by MB) + + 4.0.0 (Saturday 18 June 2022) ============================= From c90b75d44957650ad53d81980b056ec4663d8666 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 18:08:58 +0100 Subject: [PATCH 012/702] MNT: 4.0.2.dev0 --- nibabel/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/info.py b/nibabel/info.py index 2d10a7300a..fb952e9790 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -12,7 +12,7 @@ # development (pre-release) version. _version_major = 4 _version_minor = 0 -_version_micro = 1 +_version_micro = 2 _version_extra = '.dev0' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" From 29eb8941aa12c27ade6dc7da38f3cf9b1532137c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 25 Jul 2022 21:57:43 -0400 Subject: [PATCH 013/702] ENH: Add minimal support for TRKv3 --- nibabel/streamlines/trk.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 6f9987e4ee..6b45aae122 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -593,7 +593,10 @@ def _read_header(fileobj): if header_rec['version'] == 1: # There is no 4x4 matrix for voxel to RAS transformation. header_rec[Field.VOXEL_TO_RASMM] = np.zeros((4, 4)) - elif header_rec['version'] == 2: + elif header_rec['version'] == 3: + warnings.warn('Parsing a TRK v3 file as v2. Some features may not ' + 'be handled correctly.', HeaderWarning) + elif header_rec['version'] in (2, 3): pass # Nothing more to do. else: raise HeaderError('NiBabel only supports versions 1 and 2 of ' From f7fb99ceabfa1126178ebf0caf2d42308bc367d7 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 25 Jul 2022 22:18:29 -0400 Subject: [PATCH 014/702] MNT: Expire ArrayWriter.to_fileobj(nan2zero=...) argument --- nibabel/arraywriters.py | 14 +++++++------- nibabel/tests/test_arraywriters.py | 26 ++++++++++++-------------- nibabel/tests/test_removalschedule.py | 1 + 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index c5c0efb706..8523d9fedd 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -29,13 +29,12 @@ def __init__(self, array, out_dtype=None) larger ints and smaller. """ -import warnings - import numpy as np from .casting import (int_to_float, as_int, int_abs, type_info, floor_exact, best_float, shared_range) from .volumeutils import finite_range, array_to_file +from .deprecator import ExpiredDeprecationError class WriterError(Exception): @@ -192,11 +191,12 @@ def _check_nan2zero(self, nan2zero): if nan2zero != self._nan2zero: raise WriterError('Deprecated `nan2zero` argument to `to_fileobj` ' 'must be same as class value set in __init__') - warnings.warn('Please remove `nan2zero` from call to ' '`to_fileobj` ' - 'and use in instance __init__ instead.\n' - '* deprecated in version: 2.0\n' - '* will raise error in version: 4.0\n', - DeprecationWarning, stacklevel=3) + raise ExpiredDeprecationError( + 'Please remove `nan2zero` from call to `to_fileobj` ' + 'and use in instance __init__ instead.\n' + '* deprecated in version: 2.0\n' + '* Raises ExpiredDeprecationError as of version: 4.0\n' + ) def _needs_nan2zero(self): """ True if nan2zero check needed for writing array """ diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 50a250d17c..df50b4cd6b 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -13,11 +13,11 @@ make_array_writer, get_slope_inter) from ..casting import int_abs, type_info, shared_range, on_powerpc from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max +from ..deprecator import ExpiredDeprecationError from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest -from ..testing import (assert_allclose_safely, suppress_warnings, - error_warnings) +from ..testing import assert_allclose_safely, suppress_warnings FLOAT_TYPES = np.sctypes['float'] @@ -506,12 +506,11 @@ def test_nan2zero(): aw = awt(arr, np.float32, **kwargs) data_back = round_trip(aw) assert_array_equal(np.isnan(data_back), [True, False]) - # Deprecation warning for nan2zero as argument to `to_fileobj` - with error_warnings(): - with pytest.deprecated_call(): - aw.to_fileobj(BytesIO(), 'F', True) - with pytest.deprecated_call(): - aw.to_fileobj(BytesIO(), 'F', nan2zero=True) + # Expired deprecation error for nan2zero as argument to `to_fileobj` + with pytest.raises(ExpiredDeprecationError): + aw.to_fileobj(BytesIO(), 'F', True) + with pytest.raises(ExpiredDeprecationError): + aw.to_fileobj(BytesIO(), 'F', nan2zero=True) # Error if nan2zero is not the value set at initialization with pytest.raises(WriterError): aw.to_fileobj(BytesIO(), 'F', False) @@ -528,12 +527,11 @@ def test_nan2zero(): data_back = round_trip(aw) astype_res = np.array(np.nan).astype(np.int32) assert_array_equal(data_back, [astype_res, 99]) - # Deprecation warning for nan2zero as argument to `to_fileobj` - with error_warnings(): - with pytest.deprecated_call(): - aw.to_fileobj(BytesIO(), 'F', False) - with pytest.deprecated_call(): - aw.to_fileobj(BytesIO(), 'F', nan2zero=False) + # Expired deprecation error for nan2zero as argument to `to_fileobj` + with pytest.raises(ExpiredDeprecationError): + aw.to_fileobj(BytesIO(), 'F', False) + with pytest.raises(ExpiredDeprecationError): + aw.to_fileobj(BytesIO(), 'F', nan2zero=False) # Error if nan2zero is not the value set at initialization with pytest.raises(WriterError): aw.to_fileobj(BytesIO(), 'F', True) diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 30cd0f83d2..c8809b9ca4 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -62,6 +62,7 @@ ("nibabel.ecat", "EcatImage", "from_filespec"), ("nibabel.filebasedimages", "FileBasedImage", "get_header"), ("nibabel.spatialimages", "SpatialImage", "get_affine"), + ("nibabel.arraywriters", "ArrayWriter", "_check_nan2zero"), ]), ("4.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_shape"), ("nibabel.filebasedimages", "FileBasedImage", "filespec_to_files"), From a7cd91f73e36844d556be1eab7bcd317ab13c53f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 26 Jul 2022 08:56:33 -0400 Subject: [PATCH 015/702] TEST: Check old GiftiMetaData.data interface works --- nibabel/gifti/tests/test_gifti.py | 75 +++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 82cc8e25de..0ae6f24c34 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -245,6 +245,81 @@ def test_metadata(): md.get_metadata() +def test_metadata_list_interface(): + md = GiftiMetaData(key='value') + with pytest.warns(FutureWarning): + mdlist = md.data + assert len(mdlist) == 1 + assert mdlist[0].name == 'key' + assert mdlist[0].value == 'value' + + # Modify elements in-place + mdlist[0].name = 'foo' + assert mdlist[0].name == 'foo' + assert 'foo' in md + assert 'key' not in md + assert md['foo'] == 'value' + mdlist[0].value = 'bar' + assert mdlist[0].value == 'bar' + assert md['foo'] == 'bar' + + # Append new NVPair + nvpair = GiftiNVPairs('key', 'value') + mdlist.append(nvpair) + assert len(mdlist) == 2 + assert mdlist[1].name == 'key' + assert mdlist[1].value == 'value' + assert len(md) == 2 + assert md == {'foo': 'bar', 'key': 'value'} + + # Clearing empties both + mdlist.clear() + assert len(mdlist) == 0 + assert len(md) == 0 + + # Extension adds multiple keys + foobar = GiftiNVPairs('foo', 'bar') + mdlist.extend([nvpair, foobar]) + assert len(mdlist) == 2 + assert len(md) == 2 + assert md == {'key': 'value', 'foo': 'bar'} + + # Insertion updates list order, though we don't attempt to preserve it in the dict + lastone = GiftiNVPairs('last', 'one') + mdlist.insert(1, lastone) + assert len(mdlist) == 3 + assert len(md) == 3 + assert mdlist[1].name == 'last' + assert mdlist[1].value == 'one' + assert md == {'key': 'value', 'foo': 'bar', 'last': 'one'} + + # Popping returns a pair + mypair = mdlist.pop(0) + assert isinstance(mypair, GiftiNVPairs) + assert mypair.name == 'key' + assert mypair.value == 'value' + assert len(mdlist) == 2 + assert len(md) == 2 + assert 'key' not in md + assert md == {'foo': 'bar', 'last': 'one'} + # Modifying the pair now does not affect md + mypair.name = 'completelynew' + mypair.value = 'strings' + assert 'completelynew' not in md + assert md == {'foo': 'bar', 'last': 'one'} + # Check popping from the end (lastone inserted before foobar) + lastpair = mdlist.pop() + assert len(mdlist) == 1 + assert len(md) == 1 + assert md == {'last': 'one'} + + # And let's remove an old pair with a new object + lastoneagain = GiftiNVPairs('last', 'one') + mdlist.remove(lastoneagain) + assert len(mdlist) == 0 + assert len(md) == 0 + + def test_gifti_label_rgba(): rgba = np.random.rand(4) kwargs = dict(zip(['red', 'green', 'blue', 'alpha'], rgba)) From ed5c468b88a5b841aaf7b24c51fdcceb869955b0 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 26 Jul 2022 08:58:19 -0400 Subject: [PATCH 016/702] FIX: Make GiftiMetaData.data a list proxy The way to modify a metadata object used to be manipulating the .data list. Proxy calls to the list to ensure updates to the dict-like object now. --- nibabel/gifti/gifti.py | 74 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 3 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 6cd4e73c4f..474ae0f4bc 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -26,6 +26,42 @@ from ..deprecated import deprecate_with_version +class _GiftiMDList(list): + """List view of GiftiMetaData object that will translate most operations""" + def __init__(self, metadata): + self._md = metadata + super().__init__( + GiftiNVPairs._private_init(k, v, metadata) + for k, v in metadata.items() + ) + + def append(self, nvpair): + self._md[nvpair.name] = nvpair.value + super().append(nvpair) + + def clear(self): + super().clear() + self._md.clear() + + def extend(self, iterable): + for nvpair in iterable: + self.append(nvpair) + + def insert(self, index, nvpair): + self._md[nvpair.name] = nvpair.value + super().insert(index, nvpair) + + def pop(self, index=-1): + nvpair = super().pop(index) + nvpair._container = None + del self._md[nvpair.name] + return nvpair + + def remove(self, nvpair): + super().remove(nvpair) + del self._md[nvpair.name] + + class GiftiMetaData(CaretMetaData): """ A sequence of GiftiNVPairs containing metadata for a gifti data array """ @@ -76,7 +112,7 @@ def data(self): warnings.warn( "GiftiMetaData.data will be a dict in NiBabel 6.0.", FutureWarning, stacklevel=2) - return [GiftiNVPairs(k, v) for k, v in self._data.items()] + return _GiftiMDList(self) @classmethod @deprecate_with_version( @@ -114,8 +150,40 @@ class GiftiNVPairs: value : str """ def __init__(self, name=u'', value=u''): - self.name = name - self.value = value + self._name = name + self._value = value + self._container = None + + @classmethod + def _private_init(cls, name, value, md): + self = cls(name, value) + self._container = md + return self + + def __eq__(self, other): + if not isinstance(other, GiftiNVPairs): + return NotImplemented + return self.name == other.name and self.value == other.value + + @property + def name(self): + return self._name + + @name.setter + def name(self, key): + if self._container: + self._container[key] = self._container.pop(self._name) + self._name = key + + @property + def value(self): + return self._value + + @value.setter + def value(self, val): + if self._container: + self._container[self._name] = val + self._value = val class GiftiLabelTable(xml.XmlSerializable): From 301e0243af247d95a21cfdc2eda1e5f37fd309b2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 26 Jul 2022 09:10:07 -0400 Subject: [PATCH 017/702] MNT: Deprecate GiftiMetaData.data and GiftiNVPairs These really should have been deprecated, instead of FutureWarning'd. Keeping them around will cause confusion at best. --- nibabel/gifti/gifti.py | 18 +++++++++++++----- nibabel/gifti/tests/test_gifti.py | 19 ++++++++++++------- nibabel/gifti/tests/test_parse_gifti_fast.py | 4 ++-- nibabel/tests/test_removalschedule.py | 3 +++ 4 files changed, 30 insertions(+), 14 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 474ae0f4bc..6082d3739b 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -108,10 +108,11 @@ def _sanitize(args, kwargs): return (), {pair.name: pair.value} @property + @deprecate_with_version( + 'The data attribute is deprecated. Use GiftiMetaData object ' + 'directly as a dict.', + '4.0', '6.0') def data(self): - warnings.warn( - "GiftiMetaData.data will be a dict in NiBabel 6.0.", - FutureWarning, stacklevel=2) return _GiftiMDList(self) @classmethod @@ -130,7 +131,7 @@ def get_metadata(self): @property @deprecate_with_version( - 'metadata property deprecated. Use GiftiMetadata object ' + 'metadata property deprecated. Use GiftiMetaData object ' 'as dict or pass to dict() for a standard dictionary.', '4.0', '6.0') def metadata(self): @@ -149,6 +150,10 @@ class GiftiNVPairs: name : str value : str """ + @deprecate_with_version( + 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' + 'as a dict, instead.', + '4.0', '6.0') def __init__(self, name=u'', value=u''): self._name = name self._value = value @@ -156,7 +161,10 @@ def __init__(self, name=u'', value=u''): @classmethod def _private_init(cls, name, value, md): - self = cls(name, value) + """Private init method to provide warning-free experience""" + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + self = cls(name, value) self._container = md return self diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 0ae6f24c34..8249d01f92 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -228,7 +228,8 @@ def test_labeltable(): def test_metadata(): md = GiftiMetaData(key='value') # Old initialization methods - nvpair = GiftiNVPairs('key', 'value') + with pytest.warns(DeprecationWarning) as w: + nvpair = GiftiNVPairs('key', 'value') with pytest.warns(FutureWarning) as w: md2 = GiftiMetaData(nvpair=nvpair) assert len(w) == 1 @@ -236,7 +237,7 @@ def test_metadata(): md3 = GiftiMetaData.from_dict({'key': 'value'}) assert md == md2 == md3 == {'key': 'value'} # .data as a list of NVPairs is going away - with pytest.warns(FutureWarning) as w: + with pytest.warns(DeprecationWarning) as w: assert md.data[0].name == 'key' assert md.data[0].value == 'value' assert len(w) == 2 @@ -247,7 +248,7 @@ def test_metadata(): def test_metadata_list_interface(): md = GiftiMetaData(key='value') - with pytest.warns(FutureWarning): + with pytest.warns(DeprecationWarning): mdlist = md.data assert len(mdlist) == 1 assert mdlist[0].name == 'key' @@ -264,7 +265,8 @@ def test_metadata_list_interface(): assert md['foo'] == 'bar' # Append new NVPair - nvpair = GiftiNVPairs('key', 'value') + with pytest.warns(DeprecationWarning) as w: + nvpair = GiftiNVPairs('key', 'value') mdlist.append(nvpair) assert len(mdlist) == 2 assert mdlist[1].name == 'key' @@ -278,14 +280,16 @@ def test_metadata_list_interface(): assert len(md) == 0 # Extension adds multiple keys - foobar = GiftiNVPairs('foo', 'bar') + with pytest.warns(DeprecationWarning) as w: + foobar = GiftiNVPairs('foo', 'bar') mdlist.extend([nvpair, foobar]) assert len(mdlist) == 2 assert len(md) == 2 assert md == {'key': 'value', 'foo': 'bar'} # Insertion updates list order, though we don't attempt to preserve it in the dict - lastone = GiftiNVPairs('last', 'one') + with pytest.warns(DeprecationWarning) as w: + lastone = GiftiNVPairs('last', 'one') mdlist.insert(1, lastone) assert len(mdlist) == 3 assert len(md) == 3 @@ -314,7 +318,8 @@ def test_metadata_list_interface(): assert md == {'last': 'one'} # And let's remove an old pair with a new object - lastoneagain = GiftiNVPairs('last', 'one') + with pytest.warns(DeprecationWarning) as w: + lastoneagain = GiftiNVPairs('last', 'one') mdlist.remove(lastoneagain) assert len(mdlist) == 0 assert len(md) == 0 diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index b7ca2b7f4e..14a576d25b 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -147,7 +147,7 @@ def test_default_types(): # GiftiMetaData assert_default_types(img.meta) # GiftiNVPairs - Remove in NIB6 - with pytest.warns(FutureWarning): + with pytest.warns(DeprecationWarning): for nvpair in img.meta.data: assert_default_types(nvpair) # GiftiLabelTable @@ -161,7 +161,7 @@ def test_default_types(): # GiftiMetaData assert_default_types(darray.meta) # GiftiNVPairs - Remove in NIB6 - with pytest.warns(FutureWarning): + with pytest.warns(DeprecationWarning): for nvpair in darray.meta.data: assert_default_types(nvpair) diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 30cd0f83d2..46da846485 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -12,6 +12,8 @@ ] OBJECT_SCHEDULE = [ + ("7.0.0", [("nibabel.gifti.gifti", "GiftiNVPairs"), + ]), ("6.0.0", [("nibabel.loadsave", "guessed_image_type"), ("nibabel.loadsave", "read_img_data"), ("nibabel.orientations", "flip_axis"), @@ -41,6 +43,7 @@ ATTRIBUTE_SCHEDULE = [ ("7.0.0", [("nibabel.gifti.gifti", "GiftiMetaData", "from_dict"), ("nibabel.gifti.gifti", "GiftiMetaData", "metadata"), + ("nibabel.gifti.gifti", "GiftiMetaData", "data"), ]), ("5.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_data"), ("nibabel.freesurfer.mghformat", "MGHHeader", "_header_data"), From 4b432dad2b0fb3f8732f818fc68e439156ffaa2e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 20 Aug 2022 09:14:07 -0400 Subject: [PATCH 018/702] STY: Spacing --- nibabel/ecat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 3af82e10f5..f87778fc6c 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -853,7 +853,7 @@ def get_data_dtype(self, frame): def shape(self): x, y, z = self._subheader.get_shape() nframes = self._subheader.get_nframes() - return(x, y, z, nframes) + return (x, y, z, nframes) def get_mlist(self): """ get access to the mlist From e19f35bb61a41e05fc0f05fb8baf2d856e45b039 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 20 Aug 2022 13:11:51 -0400 Subject: [PATCH 019/702] TEST: Expect new exception type from Matplotlib 3.6+ --- nibabel/tests/test_viewers.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 907a3bbb1e..fd1109eaff 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -55,7 +55,12 @@ def test_viewer(): v.clim = (0, 3) with pytest.raises(ValueError): OrthoSlicer3D.clim.fset(v, (0.,)) # bad limits - with pytest.raises(ValueError): + with pytest.raises( + ( + ValueError, # MPL3.5 and lower + KeyError, # MPL3.6 and higher + ) + ): OrthoSlicer3D.cmap.fset(v, 'foo') # wrong cmap # decrement/increment volume numbers via keypress From 8aec289b2622bc0f9bfb993bd1b07e3cdbfcd829 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 14 Aug 2022 16:12:29 -0400 Subject: [PATCH 020/702] ENH: Add from_url classmethod to SerializableImage --- nibabel/filebasedimages.py | 56 ++++++++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 21fe754edf..f346adbc29 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -10,6 +10,7 @@ import io from copy import deepcopy +from urllib import request from .fileholders import FileHolder from .filename_parser import (types_filenames, TypesFilenamesError, splitext_addext) @@ -488,7 +489,7 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): class SerializableImage(FileBasedImage): """ - Abstract image class for (de)serializing images to/from byte strings. + Abstract image class for (de)serializing images to/from byte streams/strings. The class doesn't define any image properties. @@ -501,6 +502,7 @@ class SerializableImage(FileBasedImage): classmethods: * from_bytes(bytestring) - make instance by deserializing a byte string + * from_url(url) - make instance by fetching and deserializing a URL Loading from byte strings should provide round-trip equivalence: @@ -538,7 +540,30 @@ class SerializableImage(FileBasedImage): """ @classmethod - def from_bytes(klass, bytestring): + def _filemap_from_iobase(klass, ioobject: io.IOBase): + """For single-file image types, make a file map with the correct key""" + if len(klass.files_types) > 1: + raise NotImplementedError( + "(de)serialization is undefined for multi-file images" + ) + return klass.make_file_map({klass.files_types[0][0]: ioobject}) + + @classmethod + def _from_iobase(klass, ioobject: io.IOBase): + """Load image from readable IO stream + + Convert to BytesIO to enable seeking, if input stream is not seekable + """ + if not ioobject.seekable(): + ioobject = io.BytesIO(ioobject.read()) + return klass.from_file_map(klass._filemap_from_iobase(ioobject)) + + def _to_iobase(self, ioobject: io.IOBase, **kwargs): + """Save image from writable IO stream""" + self.to_file_map(self._filemap_from_iobase(ioobject), **kwargs) + + @classmethod + def from_bytes(klass, bytestring: bytes): """ Construct image from a byte string Class method @@ -548,13 +573,9 @@ def from_bytes(klass, bytestring): bstring : bytes Byte string containing the on-disk representation of an image """ - if len(klass.files_types) > 1: - raise NotImplementedError("from_bytes is undefined for multi-file images") - bio = io.BytesIO(bytestring) - file_map = klass.make_file_map({'image': bio, 'header': bio}) - return klass.from_file_map(file_map) + return klass._from_iobase(io.BytesIO(bytestring)) - def to_bytes(self, **kwargs): + def to_bytes(self, **kwargs) -> bytes: r""" Return a ``bytes`` object with the contents of the file that would be written if the image were saved. @@ -568,9 +589,20 @@ def to_bytes(self, **kwargs): bytes Serialized image """ - if len(self.__class__.files_types) > 1: - raise NotImplementedError("to_bytes() is undefined for multi-file images") bio = io.BytesIO() - file_map = self.make_file_map({'image': bio, 'header': bio}) - self.to_file_map(file_map, **kwargs) + self._to_iobase(bio, **kwargs) return bio.getvalue() + + @classmethod + def from_url(klass, url, timeout=5): + """Retrieve and load an image from a URL + + Class method + + Parameters + ---------- + url : str or urllib.request.Request object + URL of file to retrieve + """ + with request.urlopen(url, timeout=timeout) as response: + return klass._from_iobase(response) From 0303672980435271192bca811a7a5d372422ed6c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Aug 2022 07:42:01 -0400 Subject: [PATCH 021/702] TEST: Use pytest-httpserver to test validate_from_url --- nibabel/tests/test_image_api.py | 23 ++++++++++++++++++++++- setup.cfg | 1 + 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index afc04d709a..844a85f692 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -572,6 +572,28 @@ def validate_to_from_bytes(self, imaker, params): del img_a del img_b + @pytest.fixture(autouse=True) + def setup(self, httpserver): + """Make pytest fixtures available to validate functions""" + self.httpserver = httpserver + + def validate_from_url(self, imaker, params): + server = self.httpserver + + img = imaker() + img_bytes = img.to_bytes() + + server.expect_oneshot_request("/img").respond_with_data(img_bytes) + url = server.url_for("/img") + assert url.startswith("http://") # Check we'll trigger an HTTP handler + rt_img = img.__class__.from_url(url) + + assert rt_img.to_bytes() == img_bytes + assert self._header_eq(img.header, rt_img.header) + assert np.array_equal(img.get_fdata(), rt_img.get_fdata()) + del img + del rt_img + @staticmethod def _header_eq(header_a, header_b): """ Header equality check that can be overridden by a subclass of this test @@ -583,7 +605,6 @@ def _header_eq(header_a, header_b): return header_a == header_b - class LoadImageAPI(GenericImageAPI, DataInterfaceMixin, AffineMixin, diff --git a/setup.cfg b/setup.cfg index 4defb7eb14..47a7317088 100644 --- a/setup.cfg +++ b/setup.cfg @@ -61,6 +61,7 @@ test = pytest !=5.3.4 pytest-cov pytest-doctestplus + pytest-httpserver zstd = pyzstd >= 0.14.3 all = From d79dddaaafcff0bb2af672e5894656c4fd38e4c2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 20 Aug 2022 09:11:38 -0400 Subject: [PATCH 022/702] TEST: Check file:/// URLs --- nibabel/tests/test_image_api.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 844a85f692..a83be12f53 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -573,9 +573,10 @@ def validate_to_from_bytes(self, imaker, params): del img_b @pytest.fixture(autouse=True) - def setup(self, httpserver): + def setup(self, httpserver, tmp_path): """Make pytest fixtures available to validate functions""" self.httpserver = httpserver + self.tmp_path = tmp_path def validate_from_url(self, imaker, params): server = self.httpserver @@ -594,6 +595,22 @@ def validate_from_url(self, imaker, params): del img del rt_img + def validate_from_file_url(self, imaker, params): + tmp_path = self.tmp_path + + img = imaker() + import uuid + fname = tmp_path / f'img-{uuid.uuid4()}{self.standard_extension}' + img.to_filename(fname) + + rt_img = img.__class__.from_url(f"file:///{fname}") + + assert self._header_eq(img.header, rt_img.header) + assert np.array_equal(img.get_fdata(), rt_img.get_fdata()) + del img + del rt_img + + @staticmethod def _header_eq(header_a, header_b): """ Header equality check that can be overridden by a subclass of this test From 895068d2dc567808786b5a47109d2ff88e999c72 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 20 Aug 2022 09:13:47 -0400 Subject: [PATCH 023/702] FIX: Do not close seekable URL streams --- nibabel/filebasedimages.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index f346adbc29..9389b938a6 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -604,5 +604,5 @@ def from_url(klass, url, timeout=5): url : str or urllib.request.Request object URL of file to retrieve """ - with request.urlopen(url, timeout=timeout) as response: - return klass._from_iobase(response) + response = request.urlopen(url, timeout=timeout) + return klass._from_iobase(response) From 59f6db400ae65c6228803be04ad2edf0459de974 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 30 Aug 2022 13:38:10 -0400 Subject: [PATCH 024/702] TEST: Check that serialization fails as expected for multi-part images --- nibabel/tests/test_filebasedimages.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index efac76a65a..d01440eb65 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -5,6 +5,7 @@ import warnings import numpy as np +import pytest from ..filebasedimages import FileBasedHeader, FileBasedImage, SerializableImage @@ -127,3 +128,24 @@ def __init__(self, seq=None): hdr4 = H.from_header(None) assert isinstance(hdr4, H) assert hdr4.a_list == [] + + +class MultipartNumpyImage(FBNumpyImage): + # We won't actually try to write these out, just need to test an edge case + files_types = (('header', '.hdr'), ('image', '.npy')) + + +class SerializableMPNumpyImage(MultipartNumpyImage, SerializableImage): + pass + + +def test_multifile_stream_failure(): + shape = (2, 3, 4) + arr = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) + img = SerializableMPNumpyImage(arr) + with pytest.raises(NotImplementedError): + img.to_bytes() + img = SerializableNumpyImage(arr) + bstr = img.to_bytes() + with pytest.raises(NotImplementedError): + SerializableMPNumpyImage.from_bytes(bstr) From 293d8caa1e8c2bf4d5462d4d05f2fdab6a40fdc2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 30 Aug 2022 13:38:37 -0400 Subject: [PATCH 025/702] STY: Rename ioobject to io_obj for clarity --- nibabel/filebasedimages.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 9389b938a6..f7dafe7d1f 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -540,27 +540,27 @@ class SerializableImage(FileBasedImage): """ @classmethod - def _filemap_from_iobase(klass, ioobject: io.IOBase): + def _filemap_from_iobase(klass, io_obj: io.IOBase): """For single-file image types, make a file map with the correct key""" if len(klass.files_types) > 1: raise NotImplementedError( "(de)serialization is undefined for multi-file images" ) - return klass.make_file_map({klass.files_types[0][0]: ioobject}) + return klass.make_file_map({klass.files_types[0][0]: io_obj}) @classmethod - def _from_iobase(klass, ioobject: io.IOBase): + def _from_iobase(klass, io_obj: io.IOBase): """Load image from readable IO stream Convert to BytesIO to enable seeking, if input stream is not seekable """ - if not ioobject.seekable(): - ioobject = io.BytesIO(ioobject.read()) - return klass.from_file_map(klass._filemap_from_iobase(ioobject)) + if not io_obj.seekable(): + io_obj = io.BytesIO(io_obj.read()) + return klass.from_file_map(klass._filemap_from_iobase(io_obj)) - def _to_iobase(self, ioobject: io.IOBase, **kwargs): + def _to_iobase(self, io_obj: io.IOBase, **kwargs): """Save image from writable IO stream""" - self.to_file_map(self._filemap_from_iobase(ioobject), **kwargs) + self.to_file_map(self._filemap_from_iobase(io_obj), **kwargs) @classmethod def from_bytes(klass, bytestring: bytes): From c3e8d4fc914eea316b07462d9a5335031e578ead Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 30 Aug 2022 15:56:46 -0400 Subject: [PATCH 026/702] ENH: Expose to/from_stream methods --- nibabel/filebasedimages.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index f7dafe7d1f..b08f5e74d4 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -549,17 +549,30 @@ def _filemap_from_iobase(klass, io_obj: io.IOBase): return klass.make_file_map({klass.files_types[0][0]: io_obj}) @classmethod - def _from_iobase(klass, io_obj: io.IOBase): + def from_stream(klass, io_obj: io.IOBase): """Load image from readable IO stream Convert to BytesIO to enable seeking, if input stream is not seekable + + Parameters + ---------- + io_obj : IOBase object + Readable stream """ if not io_obj.seekable(): io_obj = io.BytesIO(io_obj.read()) return klass.from_file_map(klass._filemap_from_iobase(io_obj)) - def _to_iobase(self, io_obj: io.IOBase, **kwargs): - """Save image from writable IO stream""" + def to_stream(self, io_obj: io.IOBase, **kwargs): + """Save image to writable IO stream + + Parameters + ---------- + io_obj : IOBase object + Writable stream + \*\*kwargs : keyword arguments + Keyword arguments that may be passed to ``img.to_file_map()`` + """ self.to_file_map(self._filemap_from_iobase(io_obj), **kwargs) @classmethod @@ -573,7 +586,7 @@ def from_bytes(klass, bytestring: bytes): bstring : bytes Byte string containing the on-disk representation of an image """ - return klass._from_iobase(io.BytesIO(bytestring)) + return klass.from_stream(io.BytesIO(bytestring)) def to_bytes(self, **kwargs) -> bytes: r""" Return a ``bytes`` object with the contents of the file that would @@ -590,7 +603,7 @@ def to_bytes(self, **kwargs) -> bytes: Serialized image """ bio = io.BytesIO() - self._to_iobase(bio, **kwargs) + self.to_stream(bio, **kwargs) return bio.getvalue() @classmethod @@ -603,6 +616,8 @@ def from_url(klass, url, timeout=5): ---------- url : str or urllib.request.Request object URL of file to retrieve + timeout : float, optional + Time (in seconds) to wait for a response """ response = request.urlopen(url, timeout=timeout) - return klass._from_iobase(response) + return klass.from_stream(response) From 82c50ba0337e3b073920b04c3b48c3b390fb9696 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 30 Aug 2022 15:57:41 -0400 Subject: [PATCH 027/702] TEST: Test streams (replaces some to/from_bytes() tests) --- nibabel/tests/test_image_api.py | 44 +++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index a83be12f53..e4287988d7 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -26,6 +26,7 @@ import warnings from functools import partial from itertools import product +import io import pathlib import numpy as np @@ -523,34 +524,41 @@ def validate_affine_deprecated(self, imaker, params): img.get_affine() -class SerializeMixin(object): - def validate_to_bytes(self, imaker, params): +class SerializeMixin: + def validate_to_from_stream(self, imaker, params): img = imaker() - serialized = img.to_bytes() - with InTemporaryDirectory(): - fname = 'img' + self.standard_extension - img.to_filename(fname) - with open(fname, 'rb') as fobj: - file_contents = fobj.read() - assert serialized == file_contents + klass = getattr(self, 'klass', img.__class__) + stream = io.BytesIO() + img.to_stream(stream) + + rt_img = klass.from_stream(stream) + assert self._header_eq(img.header, rt_img.header) + assert np.array_equal(img.get_fdata(), rt_img.get_fdata()) - def validate_from_bytes(self, imaker, params): + def validate_file_stream_equivalence(self, imaker, params): img = imaker() klass = getattr(self, 'klass', img.__class__) with InTemporaryDirectory(): fname = 'img' + self.standard_extension img.to_filename(fname) - all_images = list(getattr(self, 'example_images', [])) + [{'fname': fname}] - for img_params in all_images: - img_a = klass.from_filename(img_params['fname']) - with open(img_params['fname'], 'rb') as fobj: - img_b = klass.from_bytes(fobj.read()) + with open("stream", "wb") as fobj: + img.to_stream(fobj) - assert self._header_eq(img_a.header, img_b.header) + # Check that writing gets us the same thing + contents1 = pathlib.Path(fname).read_bytes() + contents2 = pathlib.Path("stream").read_bytes() + assert contents1 == contents2 + + # Check that reading gets us the same thing + img_a = klass.from_filename(fname) + with open(fname, "rb") as fobj: + img_b = klass.from_stream(fobj) + # This needs to happen while the filehandle is open assert np.array_equal(img_a.get_fdata(), img_b.get_fdata()) - del img_a - del img_b + assert self._header_eq(img_a.header, img_b.header) + del img_a + del img_b def validate_to_from_bytes(self, imaker, params): img = imaker() From 0efe615554874c8d6c1dd3934e22a91f9ffa297f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 31 Aug 2022 09:39:12 -0400 Subject: [PATCH 028/702] REL: 4.0.2 --- Changelog | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Changelog b/Changelog index 55586f87a3..8cb27e84f1 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,22 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +4.0.2 (Wednesday 31 August 2022) +================================ + +Bug-fix release in the 4.0.x series. + +Bug fixes +--------- +* Make ``GiftiMetaData.data`` a list proxy, deprecate (pr/1127) (CM, reviewed + by Hao-Ting Wang) + +Maintenance +----------- +* Finalize deprecation of ``ArrayWriter.to_fileobj(nan2zero=...)`` argument + (pr/1126) (CM) + + 4.0.1 (Saturday 18 June 2022) ============================= From e3f55dfe444cade77a97d28df0d345b520a86943 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 31 Aug 2022 09:46:09 -0400 Subject: [PATCH 029/702] MNT: 4.0.3.dev0 --- nibabel/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/info.py b/nibabel/info.py index fb952e9790..426d91d762 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -12,7 +12,7 @@ # development (pre-release) version. _version_major = 4 _version_minor = 0 -_version_micro = 2 +_version_micro = 3 _version_extra = '.dev0' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" From ea68dd1697a896490d9131f540956eab8d30beb3 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 20 Aug 2022 09:24:56 -0400 Subject: [PATCH 030/702] ENH: Make layout order an initialization parameter of ArrayProxy --- nibabel/arrayproxy.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 6cda3a206a..9546360ca7 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -53,7 +53,7 @@ KEEP_FILE_OPEN_DEFAULT = False -class ArrayProxy(object): +class ArrayProxy: """ Class to act as proxy for the array that can be read from a file The array proxy allows us to freeze the passed fileobj and header such that @@ -83,10 +83,9 @@ class ArrayProxy(object): See :mod:`nibabel.minc1`, :mod:`nibabel.ecat` and :mod:`nibabel.parrec` for examples. """ - # Assume Fortran array memory layout order = 'F' - def __init__(self, file_like, spec, *, mmap=True, keep_file_open=None): + def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=None): """Initialize array proxy instance Parameters @@ -116,6 +115,10 @@ def __init__(self, file_like, spec, *, mmap=True, keep_file_open=None): True gives the same behavior as ``mmap='c'``. If `file_like` cannot be memory-mapped, ignore `mmap` value and read array from file. + order : {'F', 'C'}, optional, keyword only + `order` controls the order of the data array layout. Fortran-style, + column-major order may be indicated with 'F', and C-style, row-major + order may be indicated with 'C'. The default order is 'F'. keep_file_open : { None, True, False }, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is @@ -128,6 +131,8 @@ def __init__(self, file_like, spec, *, mmap=True, keep_file_open=None): """ if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") + if order not in (None, 'C', 'F'): + raise ValueError("order should be one of {'C', 'F'}") self.file_like = file_like if hasattr(spec, 'get_data_shape'): slope, inter = spec.get_slope_inter() @@ -147,6 +152,8 @@ def __init__(self, file_like, spec, *, mmap=True, keep_file_open=None): # Permit any specifier that can be interpreted as a numpy dtype self._dtype = np.dtype(self._dtype) self._mmap = mmap + if order is not None: + self.order = order # Flags to keep track of whether a single ImageOpener is created, and # whether a single underlying file handle is created. self._keep_file_open, self._persist_opener = \ From 015559845a2217204e546d797a9f8eb25fe9e54d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 20 Aug 2022 12:53:47 -0400 Subject: [PATCH 031/702] TEST: Refactor test loops as parameterizations --- nibabel/tests/test_arrayproxy.py | 47 +++++++++++++++++++------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 80806cae3a..deaa6f4e11 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -154,27 +154,36 @@ def test_nifti1_init(): assert_array_equal(np.asarray(ap), arr * 2.0 + 10) -def test_proxy_slicing(): - shapes = (15, 16, 17) - for n_dim in range(1, len(shapes) + 1): - shape = shapes[:n_dim] - arr = np.arange(np.prod(shape)).reshape(shape) - for offset in (0, 20): - hdr = Nifti1Header() - hdr.set_data_offset(offset) - hdr.set_data_dtype(arr.dtype) - hdr.set_data_shape(shape) - for order, klass in ('F', ArrayProxy), ('C', CArrayProxy): - fobj = BytesIO() - fobj.write(b'\0' * offset) - fobj.write(arr.tobytes(order=order)) - prox = klass(fobj, hdr) - for sliceobj in slicer_samples(shape): - assert_array_equal(arr[sliceobj], prox[sliceobj]) - # Check slicing works with scaling +@pytest.mark.parametrize("n_dim", (1, 2, 3)) +@pytest.mark.parametrize("offset", (0, 20)) +def test_proxy_slicing(n_dim, offset): + shape = (15, 16, 17)[:n_dim] + arr = np.arange(np.prod(shape)).reshape(shape) + hdr = Nifti1Header() + hdr.set_data_offset(offset) + hdr.set_data_dtype(arr.dtype) + hdr.set_data_shape(shape) + for order, klass in ('F', ArrayProxy), ('C', CArrayProxy): + fobj = BytesIO() + fobj.write(b'\0' * offset) + fobj.write(arr.tobytes(order=order)) + prox = klass(fobj, hdr) + assert prox.order == order + for sliceobj in slicer_samples(shape): + assert_array_equal(arr[sliceobj], prox[sliceobj]) + + +def test_proxy_slicing_with_scaling(): + shape = (15, 16, 17) + offset = 20 + arr = np.arange(np.prod(shape)).reshape(shape) + hdr = Nifti1Header() + hdr.set_data_offset(offset) + hdr.set_data_dtype(arr.dtype) + hdr.set_data_shape(shape) hdr.set_slope_inter(2.0, 1.0) fobj = BytesIO() - fobj.write(b'\0' * offset) + fobj.write(bytes(offset)) fobj.write(arr.tobytes(order='F')) prox = ArrayProxy(fobj, hdr) sliceobj = (None, slice(None), 1, -1) From 47209327e6f9da864d5f88facd2a84aa1a853a38 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 20 Aug 2022 12:54:05 -0400 Subject: [PATCH 032/702] TEST: Test order kwarg to ArrayProxy classes --- nibabel/tests/test_arrayproxy.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index deaa6f4e11..c4c44f72f2 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -190,6 +190,19 @@ def test_proxy_slicing_with_scaling(): assert_array_equal(arr[sliceobj] * 2.0 + 1.0, prox[sliceobj]) +@pytest.mark.parametrize("order", ("C", "F")) +def test_order_override(order): + shape = (15, 16, 17) + arr = np.arange(np.prod(shape)).reshape(shape) + fobj = BytesIO() + fobj.write(arr.tobytes(order=order)) + for klass in (ArrayProxy, CArrayProxy): + prox = klass(fobj, (shape, arr.dtype), order=order) + assert prox.order == order + sliceobj = (None, slice(None), 1, -1) + assert_array_equal(arr[sliceobj], prox[sliceobj]) + + def test_is_proxy(): # Test is_proxy function hdr = FunkyHeader((2, 3, 4)) From df252cad2cbb4cbcb7624674d54c2460715baf81 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 30 Aug 2022 16:12:31 -0400 Subject: [PATCH 033/702] Update nibabel/arrayproxy.py Co-authored-by: Matthew Brett --- nibabel/arrayproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 9546360ca7..230ad00272 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -115,7 +115,7 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non True gives the same behavior as ``mmap='c'``. If `file_like` cannot be memory-mapped, ignore `mmap` value and read array from file. - order : {'F', 'C'}, optional, keyword only + order : {None, 'F', 'C'}, optional, keyword only `order` controls the order of the data array layout. Fortran-style, column-major order may be indicated with 'F', and C-style, row-major order may be indicated with 'C'. The default order is 'F'. From 3cfe3370e5f2dfd55e175553af70ebb1f3d5a87b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 30 Aug 2022 17:08:38 -0400 Subject: [PATCH 034/702] RF: Change ArrayProxy.order class var to _default_order, deprecate order --- nibabel/arrayproxy.py | 19 +++++++++++++--- nibabel/tests/test_arrayproxy.py | 39 ++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 230ad00272..4b8287194e 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -27,6 +27,7 @@ """ from contextlib import contextmanager from threading import RLock +import warnings import numpy as np @@ -83,7 +84,7 @@ class ArrayProxy: See :mod:`nibabel.minc1`, :mod:`nibabel.ecat` and :mod:`nibabel.parrec` for examples. """ - order = 'F' + _default_order = 'F' def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=None): """Initialize array proxy instance @@ -147,13 +148,25 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non else: raise TypeError('spec must be tuple of length 2-5 or header object') + # Warn downstream users that the class variable order is going away + if hasattr(self.__class__, 'order'): + warnings.warn(f'Class {self.__class__} has an `order` class variable. ' + 'ArrayProxy subclasses should rename this variable to `_default_order` ' + 'to avoid conflict with instance variables.\n' + '* deprecated in version: 5.0\n' + '* will raise error in version: 7.0\n', + DeprecationWarning, stacklevel=2) + # Override _default_order with order, to follow intent of subclasser + self._default_order = self.order + # Copies of values needed to read array self._shape, self._dtype, self._offset, self._slope, self._inter = par # Permit any specifier that can be interpreted as a numpy dtype self._dtype = np.dtype(self._dtype) self._mmap = mmap - if order is not None: - self.order = order + if order is None: + order = self._default_order + self.order = order # Flags to keep track of whether a single ImageOpener is created, and # whether a single underlying file handle is created. self._keep_file_open, self._persist_opener = \ diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index c4c44f72f2..4bbbe31abd 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -15,13 +15,16 @@ import pickle from io import BytesIO +from packaging.version import Version from ..tmpdirs import InTemporaryDirectory import numpy as np +from .. import __version__ from ..arrayproxy import (ArrayProxy, is_proxy, reshape_dataobj, get_obj_dtype) from ..openers import ImageOpener from ..nifti1 import Nifti1Header +from ..deprecator import ExpiredDeprecationError from unittest import mock @@ -57,6 +60,10 @@ def copy(self): class CArrayProxy(ArrayProxy): # C array memory layout + _default_order = 'C' + + +class DeprecatedCArrayProxy(ArrayProxy): order = 'C' @@ -203,6 +210,38 @@ def test_order_override(order): assert_array_equal(arr[sliceobj], prox[sliceobj]) +def test_deprecated_order_classvar(): + shape = (15, 16, 17) + arr = np.arange(np.prod(shape)).reshape(shape) + fobj = BytesIO() + fobj.write(arr.tobytes(order='C')) + sliceobj = (None, slice(None), 1, -1) + + # We don't really care about the original order, just that the behavior + # of the deprecated mode matches the new behavior + fprox = ArrayProxy(fobj, (shape, arr.dtype), order='F') + cprox = ArrayProxy(fobj, (shape, arr.dtype), order='C') + + # Start raising errors when we crank the dev version + if Version(__version__) >= Version('7.0.0.dev0'): + cm = pytest.raises(ExpiredDeprecationError) + else: + cm = pytest.deprecated_call() + + with cm: + prox = DeprecatedCArrayProxy(fobj, (shape, arr.dtype)) + assert prox.order == 'C' + assert_array_equal(prox[sliceobj], cprox[sliceobj]) + with cm: + prox = DeprecatedCArrayProxy(fobj, (shape, arr.dtype), order='C') + assert prox.order == 'C' + assert_array_equal(prox[sliceobj], cprox[sliceobj]) + with cm: + prox = DeprecatedCArrayProxy(fobj, (shape, arr.dtype), order='F') + assert prox.order == 'F' + assert_array_equal(prox[sliceobj], fprox[sliceobj]) + + def test_is_proxy(): # Test is_proxy function hdr = FunkyHeader((2, 3, 4)) From dcf95664710feebd0a18aed305626a5ecb733c67 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 31 Aug 2022 09:54:02 -0400 Subject: [PATCH 035/702] TEST: Check None and invalid order arguments --- nibabel/tests/test_arrayproxy.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 4bbbe31abd..fdf807654a 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -88,6 +88,9 @@ def test_init(): assert ap.shape != shape # Data stays the same, also assert_array_equal(np.asarray(ap), arr) + # You wouldn't do this, but order=None explicitly requests the default order + ap2 = ArrayProxy(bio, FunkyHeader(arr.shape), order=None) + assert_array_equal(np.asarray(ap2), arr) # C order also possible bio = BytesIO() bio.seek(16) @@ -97,6 +100,8 @@ def test_init(): # Illegal init with pytest.raises(TypeError): ArrayProxy(bio, object()) + with pytest.raises(ValueError): + ArrayProxy(bio, hdr, order='badval') def test_tuplespec(): From 0dbfb67139da8357683b81ee76a882912ef13be0 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 4 Sep 2022 15:35:17 -0400 Subject: [PATCH 036/702] CI: Update action versions --- .github/workflows/misc.yml | 6 +++--- .github/workflows/pre-release.yml | 6 +++--- .github/workflows/stable.yml | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 11ed7ee4d8..5317b3d811 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -35,12 +35,12 @@ jobs: EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: recursive fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -63,7 +63,7 @@ jobs: run: tools/ci/submit_coverage.sh if: ${{ always() }} - name: Upload pytest test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: for_testing/test-results.xml diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 6f5c2a1aa0..4979713fe0 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -56,12 +56,12 @@ jobs: EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: recursive fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -84,7 +84,7 @@ jobs: run: tools/ci/submit_coverage.sh if: ${{ always() }} - name: Upload pytest test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: for_testing/test-results.xml diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index dfc9649710..56a71b44f4 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -103,12 +103,12 @@ jobs: EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: recursive fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -131,7 +131,7 @@ jobs: run: tools/ci/submit_coverage.sh if: ${{ always() }} - name: Upload pytest test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: for_testing/test-results.xml From f1678f86bf7c69f6435c92744e9d4c14744bf408 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 4 Sep 2022 20:05:16 -0400 Subject: [PATCH 037/702] CI: Build, test and deploy packages --- .github/workflows/stable.yml | 67 ++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 56a71b44f4..fbf76a4124 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -23,6 +23,59 @@ defaults: shell: bash jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - uses: actions/setup-python@v4 + with: + python-version: 3 + - run: pip install --upgrade build twine + - name: Build sdist and wheel + run: python -m build + - run: twine check dist/* + - name: Build git archive + run: git archive -v -o dist/nibabel-archive.tgz HEAD + - uses: actions/upload-artifact@v3 + with: + name: dist + path: dist/ + + test-package: + runs-on: ubuntu-latest + needs: [build] + strategy: + matrix: + package: ['wheel', 'sdist', 'archive'] + steps: + - uses: actions/download-artifact@v3 + with: + name: dist + path: dist/ + - uses: actions/setup-python@v4 + with: + python-version: 3 + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Update pip + run: pip install --upgrade pip + - name: Install wheel + run: pip install dist/nibabel-*.whl + if: matrix.package == 'wheel' + - name: Install sdist + run: pip install dist/nibabel-*.tar.gz + if: matrix.package == 'sdist' + - name: Install archive + run: pip install dist/nibabel-archive.tgz + if: matrix.package == 'archive' + - run: python -c 'import nibabel; print(nibabel.__version__)' + - name: Install test extras + run: pip install nibabel[test] + - name: Run tests + run: pytest --doctest-modules --doctest-plus -v --pyargs nibabel + stable: # Check each OS, all supported Python, minimum versions and latest releases runs-on: ${{ matrix.os }} @@ -136,3 +189,17 @@ jobs: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: for_testing/test-results.xml if: ${{ always() && matrix.check == 'test' }} + + publish: + runs-on: ubuntu-latest + needs: [stable, test-package] + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') + steps: + - uses: actions/download-artifact@v3 + with: + name: dist + path: dist/ + - uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} From ae0441bf7f19c2e83365e87639719def05d7f356 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 4 Sep 2022 20:33:13 -0400 Subject: [PATCH 038/702] CI: Drop old wheel/sdist/archive tests --- .github/workflows/stable.yml | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index fbf76a4124..8ad47eb0b1 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -121,28 +121,6 @@ jobs: check: skiptests pip-flags: '' depends: '' - # Check all installation methods - - os: ubuntu-latest - python-version: "3.10" - install: wheel - check: test - pip-flags: '' - depends: REQUIREMENTS - optional-depends: DEFAULT_OPT_DEPENDS - - os: ubuntu-latest - python-version: "3.10" - install: sdist - check: test - pip-flags: '' - depends: REQUIREMENTS - optional-depends: DEFAULT_OPT_DEPENDS - - os: ubuntu-latest - python-version: "3.10" - install: archive - check: test - pip-flags: '' - depends: REQUIREMENTS - optional-depends: DEFAULT_OPT_DEPENDS exclude: - os: ubuntu-latest architecture: x86 From 28ccfc404c2751b842fdbd5af5d82a25fa30e503 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 6 Sep 2022 10:08:54 -0400 Subject: [PATCH 039/702] CI: Run on tags --- .github/workflows/stable.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 8ad47eb0b1..c7fc86ad22 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -11,6 +11,8 @@ on: branches: - master - maint/* + tags: + - "*" pull_request: branches: - master From bc8e0c75111e883e732eaedca590b8e8092c213a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 6 Sep 2022 10:23:36 -0400 Subject: [PATCH 040/702] CI: Use package deployment environment to insert a manual review stage --- .github/workflows/stable.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index c7fc86ad22..9086dab9d9 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -172,6 +172,7 @@ jobs: publish: runs-on: ubuntu-latest + environment: "Package deployment" needs: [stable, test-package] if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: From 58122a84a177633410973b06522e591727d82904 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 4 Sep 2022 10:42:00 -0400 Subject: [PATCH 041/702] MNT: Move most setup metadata into pyproject.toml --- pyproject.toml | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ setup.cfg | 58 ----------------------------------------------- setup.py | 33 +++++++++++++++------------ 3 files changed, 80 insertions(+), 72 deletions(-) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..39329568bf --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,61 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta:__legacy__" + +[project] +name = "nibabel" +description = "Access a multitude of neuroimaging data formats" +authors = [ + { name = "nibabel developers", email = "neuroimaging@python.org" }, +] +maintainers = [ + { name = "Christopher Markiewicz" }, +] +readme = "README.rst" +license = { text="MIT License" } +requires-python = ">=3.7" +dependencies = ["numpy >=1.17", "packaging >=17", "setuptools"] +classifiers = [ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering", +] +# Version from versioneer +# optional-dependencies from setup.cfg (using ConfigParser features) +dynamic = ["version", "optional-dependencies"] + +[project.urls] +"Homepage" = "https://nipy.org/nibabel" +"Development" = "https://github.com/nipy/nibabel" + +[project.scripts] +nib-conform = "nibabel.cmdline.conform:main" +nib-convert = "nibabel.cmdline.convert:main" +nib-ls = "nibabel.cmdline.ls:main" +nib-dicomfs = "nibabel.cmdline.dicomfs:main" +nib-diff = "nibabel.cmdline.diff:main" +nib-stats = "nibabel.cmdline.stats:main" +nib-nifti-dx = "nibabel.cmdline.nifti_dx:main" +nib-tck2trk = "nibabel.cmdline.tck2trk:main" +nib-trk2tck = "nibabel.cmdline.trk2tck:main" +nib-roi = "nibabel.cmdline.roi:main" +parrec2nii = "nibabel.cmdline.parrec2nii:main" + +[tool.setuptools] +platforms = ["OS Independent"] +provides = ["nibabel", "nisext"] +zip-safe = false + +[tool.setuptools.packages] +find = {} + +[tool.setuptools.package-data] +nibabel = ["tests/data/*", "*/tests/data/*", "benchmarks/pytest.benchmark.ini"] diff --git a/setup.cfg b/setup.cfg index 47a7317088..03e6e41171 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,41 +1,3 @@ -[metadata] -name = nibabel -url = https://nipy.org/nibabel -download_url = https://github.com/nipy/nibabel -author = nibabel developers -author_email = neuroimaging@python.org -maintainer = Chris Markiewicz -maintainer_email = neuroimaging@python.org -classifiers = - Development Status :: 4 - Beta - Environment :: Console - Intended Audience :: Science/Research - License :: OSI Approved :: MIT License - Operating System :: OS Independent - Programming Language :: Python - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Topic :: Scientific/Engineering -license = MIT License -description = Access a multitude of neuroimaging data formats -long_description = file:README.rst -long_description_content_type = text/x-rst; charset=UTF-8 -platforms = OS Independent -provides = - nibabel - nisext - -[options] -python_requires = >=3.7 -install_requires = - numpy >=1.17 - packaging >=17.0 - setuptools -zip_safe = False -packages = find: - [options.extras_require] dicom = pydicom >=1.0.0 @@ -74,26 +36,6 @@ all = %(test)s %(zstd)s -[options.entry_points] -console_scripts = - nib-conform=nibabel.cmdline.conform:main - nib-convert=nibabel.cmdline.convert:main - nib-ls=nibabel.cmdline.ls:main - nib-dicomfs=nibabel.cmdline.dicomfs:main - nib-diff=nibabel.cmdline.diff:main - nib-stats=nibabel.cmdline.stats:main - nib-nifti-dx=nibabel.cmdline.nifti_dx:main - nib-tck2trk=nibabel.cmdline.tck2trk:main - nib-trk2tck=nibabel.cmdline.trk2tck:main - nib-roi=nibabel.cmdline.roi:main - parrec2nii=nibabel.cmdline.parrec2nii:main - -[options.package_data] -nibabel = - tests/data/* - */tests/data/* - benchmarks/pytest.benchmark.ini - [flake8] max-line-length = 100 ignore = D100,D101,D102,D103,D104,D105,D200,D201,D202,D204,D205,D208,D209,D210,D300,D301,D400,D401,D403,E24,E121,E123,E126,E226,E266,E402,E704,E731,F821,I100,I101,I201,N802,N803,N804,N806,W503,W504,W605 diff --git a/setup.py b/setup.py index 29fb3642da..4b9bde35b0 100755 --- a/setup.py +++ b/setup.py @@ -7,22 +7,27 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Build helper.""" +""" +Setuptools entrypoint -import sys +This file is a basic stub needed to integrate with versioneer, which allows the +version to be retrieved from git and set statically in a built package. + +This file should not be run directly. To install, use: + + pip install . + +To build a package for distribution, use: + + pip install --upgrade build + python -m build + +""" from setuptools import setup import versioneer -# Give setuptools a hint to complain if it's too old a version -# 30.3.0 allows us to put most metadata in setup.cfg -# Should match pyproject.toml -SETUP_REQUIRES = ['setuptools >= 30.3.0'] -# This enables setuptools to install wheel on-the-fly -SETUP_REQUIRES += ['wheel'] if 'bdist_wheel' in sys.argv else [] - -if __name__ == "__main__": - setup(name='nibabel', - setup_requires=SETUP_REQUIRES, - version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass()) +setup( + version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), +) From 58c86855d4c050d35bbcd3eb6114b6ff5c5be695 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 4 Sep 2022 10:59:54 -0400 Subject: [PATCH 042/702] MNT: Mark as stable --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 39329568bf..01d06cdfbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ license = { text="MIT License" } requires-python = ">=3.7" dependencies = ["numpy >=1.17", "packaging >=17", "setuptools"] classifiers = [ - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", From 7ee63272b2e346e4d0c3c1f790c64ba737ddc31f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 4 Sep 2022 15:15:49 -0400 Subject: [PATCH 043/702] MNT: Build requirements files from pyproject.toml --- min-requirements.txt | 2 +- requirements.txt | 2 +- tools/update_requirements.py | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/min-requirements.txt b/min-requirements.txt index ac8baf97f6..8308f6e076 100644 --- a/min-requirements.txt +++ b/min-requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py numpy ==1.17 -packaging ==17.0 +packaging ==17 setuptools diff --git a/requirements.txt b/requirements.txt index 5466c4e508..2c77ae1e0d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py numpy >=1.17 -packaging >=17.0 +packaging >=17 setuptools diff --git a/tools/update_requirements.py b/tools/update_requirements.py index 551424994c..b167438c6f 100755 --- a/tools/update_requirements.py +++ b/tools/update_requirements.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 import sys -from configparser import ConfigParser +import tomli from pathlib import Path if sys.version_info < (3, 6): @@ -8,13 +8,13 @@ sys.exit(1) repo_root = Path(__file__).parent.parent -setup_cfg = repo_root / "setup.cfg" +pyproject_toml = repo_root / "pyproject.toml" reqs = repo_root / "requirements.txt" min_reqs = repo_root / "min-requirements.txt" -config = ConfigParser() -config.read(setup_cfg) -requirements = config.get("options", "install_requires").strip().splitlines() +with open(pyproject_toml, 'rb') as fobj: + config = tomli.load(fobj) +requirements = config["project"]["dependencies"] script_name = Path(__file__).relative_to(repo_root) From cba4607579acea6f7f2d7ab8e8d1507d56ab2344 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 7 Sep 2022 08:41:35 -0400 Subject: [PATCH 044/702] Apply suggestions from code review Co-authored-by: Matthew Brett --- nibabel/arrayproxy.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 4b8287194e..dc9b171c0b 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -119,7 +119,8 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non order : {None, 'F', 'C'}, optional, keyword only `order` controls the order of the data array layout. Fortran-style, column-major order may be indicated with 'F', and C-style, row-major - order may be indicated with 'C'. The default order is 'F'. + order may be indicated with 'C'. None gives the default order, that + comes from the `_default_order` class variable. keep_file_open : { None, True, False }, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is @@ -133,7 +134,7 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") if order not in (None, 'C', 'F'): - raise ValueError("order should be one of {'C', 'F'}") + raise ValueError("order should be one of {None, 'C', 'F'}") self.file_like = file_like if hasattr(spec, 'get_data_shape'): slope, inter = spec.get_slope_inter() From cff42d6cd971803fc774ec69db3331a45c731496 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 7 Sep 2022 08:46:14 -0400 Subject: [PATCH 045/702] Update nibabel/tests/test_arrayproxy.py --- nibabel/tests/test_arrayproxy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index fdf807654a..ed89105aa0 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -64,6 +64,7 @@ class CArrayProxy(ArrayProxy): class DeprecatedCArrayProxy(ArrayProxy): + # Used in test_deprecated_order_classvar. Remove when that test is removed (8.0) order = 'C' From 3a5c1d0a06a28b148ddfee8c0b440553c8b71d58 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 13 Sep 2022 09:35:28 -0400 Subject: [PATCH 046/702] TEST: Validate issue gh-1137 --- nibabel/tests/test_loadsave.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index ad7c34cbcf..721c8a414b 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -13,6 +13,7 @@ from ..loadsave import load, read_img_data, _signature_matches_extension from ..filebasedimages import ImageFileError from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory +from ..openers import Opener from ..optpkg import optional_package _, have_scipy, _ = optional_package('scipy') @@ -82,6 +83,15 @@ def test_load_bad_compressed_extension(tmp_path, extension): load(file_path) +@pytest.mark.parametrize("extension", [".gz", ".bz2"]) +def test_load_good_extension_with_bad_data(tmp_path, extension): + file_path = tmp_path / f"img.nii{extension}" + with Opener(file_path, "wb") as fobj: + fobj.write(b"bad") + with pytest.raises(ImageFileError, match="Cannot work out file type of .*"): + load(file_path) + + def test_signature_matches_extension(tmp_path): gz_signature = b"\x1f\x8b" good_file = tmp_path / "good.gz" From c0ab75580542106fbee7a2ece664e00c36d3c923 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 13 Sep 2022 09:39:53 -0400 Subject: [PATCH 047/702] FIX: Do not attempt to pass sniffed bytes to _signature_matches_extension --- nibabel/loadsave.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 04fee7b6a2..a75e0e664b 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -22,7 +22,7 @@ _compressed_suffixes = ('.gz', '.bz2', '.zst') -def _signature_matches_extension(filename, sniff): +def _signature_matches_extension(filename): """Check if signature aka magic number matches filename extension. Parameters @@ -30,10 +30,6 @@ def _signature_matches_extension(filename, sniff): filename : str or os.PathLike Path to the file to check - sniff : bytes or None - First bytes of the file. If not `None` and long enough to contain the - signature, avoids having to read the start of the file. - Returns ------- matches : bool @@ -56,12 +52,11 @@ def _signature_matches_extension(filename, sniff): if ext not in signatures: return True, "" expected_signature = signatures[ext]["signature"] - if sniff is None or len(sniff) < len(expected_signature): - try: - with open(filename, "rb") as fh: - sniff = fh.read(len(expected_signature)) - except OSError: - return False, f"Could not read file: {filename}" + try: + with open(filename, "rb") as fh: + sniff = fh.read(len(expected_signature)) + except OSError: + return False, f"Could not read file: {filename}" if sniff.startswith(expected_signature): return True, "" format_name = signatures[ext]["format_name"] @@ -100,7 +95,7 @@ def load(filename, **kwargs): img = image_klass.from_filename(filename, **kwargs) return img - matches, msg = _signature_matches_extension(filename, sniff) + matches, msg = _signature_matches_extension(filename) if not matches: raise ImageFileError(msg) From 8bad78fa979c493a443877a9c91266b4fca7461e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 13 Sep 2022 09:40:09 -0400 Subject: [PATCH 048/702] TEST: Update/remove _signature_matches_extension tests --- nibabel/tests/test_loadsave.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 721c8a414b..21717df0e6 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -98,27 +98,19 @@ def test_signature_matches_extension(tmp_path): good_file.write_bytes(gz_signature) bad_file = tmp_path / "bad.gz" bad_file.write_bytes(b"bad") - matches, msg = _signature_matches_extension( - tmp_path / "uncompressed.nii", None) + matches, msg = _signature_matches_extension(tmp_path / "uncompressed.nii") assert matches assert msg == "" - matches, msg = _signature_matches_extension(tmp_path / "missing.gz", None) + matches, msg = _signature_matches_extension(tmp_path / "missing.gz") assert not matches assert msg.startswith("Could not read") - matches, msg = _signature_matches_extension(bad_file, None) + matches, msg = _signature_matches_extension(bad_file) assert not matches assert "is not a" in msg - matches, msg = _signature_matches_extension(bad_file, gz_signature + b"abc") + matches, msg = _signature_matches_extension(good_file) assert matches assert msg == "" - matches, msg = _signature_matches_extension( - good_file, gz_signature + b"abc") - assert matches - assert msg == "" - matches, msg = _signature_matches_extension(good_file, gz_signature[:1]) - assert matches - assert msg == "" - matches, msg = _signature_matches_extension(good_file, None) + matches, msg = _signature_matches_extension(tmp_path / "missing.nii") assert matches assert msg == "" From fcd489ba770e539fcf18e1878038eb031a6c262a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 13 Sep 2022 09:58:22 -0400 Subject: [PATCH 049/702] ENH: Throw in .zst while here --- nibabel/filebasedimages.py | 2 +- nibabel/filename_parser.py | 2 +- nibabel/loadsave.py | 3 ++- nibabel/tests/test_loadsave.py | 9 +++++++-- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index b08f5e74d4..4a194576b3 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -430,7 +430,7 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): try: with ImageOpener(meta_fname, 'rb') as fobj: binaryblock = fobj.read(sniff_nbytes) - except IOError: + except (IOError, EOFError): return None return (binaryblock, meta_fname) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index d8ed87c38a..e254019883 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -253,7 +253,7 @@ def _iendswith(whole, end): def splitext_addext(filename, - addexts=('.gz', '.bz2'), + addexts=('.gz', '.bz2', '.zst'), match_case=False): """ Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index a75e0e664b..ff176f541d 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -44,7 +44,8 @@ def _signature_matches_extension(filename): """ signatures = { ".gz": {"signature": b"\x1f\x8b", "format_name": "gzip"}, - ".bz2": {"signature": b"BZh", "format_name": "bzip2"} + ".bz2": {"signature": b"BZh", "format_name": "bzip2"}, + ".zst": {"signature": b"\x28\xb5\x2f\xfd", "format_name": "ztsd"}, } filename = _stringify_path(filename) *_, ext = splitext_addext(filename) diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 21717df0e6..f2cf0242d5 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -17,6 +17,7 @@ from ..optpkg import optional_package _, have_scipy, _ = optional_package('scipy') +_, have_pyzstd, _ = optional_package('pyzstd') from numpy.testing import (assert_almost_equal, assert_array_equal) @@ -75,16 +76,20 @@ def test_load_empty_image(): assert str(err.value).startswith('Empty file: ') -@pytest.mark.parametrize("extension", [".gz", ".bz2"]) +@pytest.mark.parametrize("extension", [".gz", ".bz2", ".zst"]) def test_load_bad_compressed_extension(tmp_path, extension): + if extension == ".zst" and not have_pyzstd: + pytest.skip() file_path = tmp_path / f"img.nii{extension}" file_path.write_bytes(b"bad") with pytest.raises(ImageFileError, match=".*is not a .* file"): load(file_path) -@pytest.mark.parametrize("extension", [".gz", ".bz2"]) +@pytest.mark.parametrize("extension", [".gz", ".bz2", ".zst"]) def test_load_good_extension_with_bad_data(tmp_path, extension): + if extension == ".zst" and not have_pyzstd: + pytest.skip() file_path = tmp_path / f"img.nii{extension}" with Opener(file_path, "wb") as fobj: fobj.write(b"bad") From 48d0f73629b914d69a9ac601faad64df176c4f30 Mon Sep 17 00:00:00 2001 From: Anibal Date: Wed, 28 Sep 2022 12:49:21 -0500 Subject: [PATCH 050/702] fix: handle invalid tck files --- nibabel/streamlines/tck.py | 59 +++++++++++++++++-------- nibabel/streamlines/tests/test_tck.py | 18 ++++++++ nibabel/tests/data/no-header-end.tck | Bin 0 -> 81 bytes nibabel/tests/data/no-magic-number.tck | Bin 0 -> 71 bytes 4 files changed, 59 insertions(+), 18 deletions(-) create mode 100644 nibabel/tests/data/no-header-end.tck create mode 100644 nibabel/tests/data/no-magic-number.tck diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index ff12bc2322..79bbca0f43 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -91,8 +91,8 @@ def is_correct_format(cls, fileobj): otherwise returns False. """ with Opener(fileobj) as f: - magic_number = asstr(f.fobj.readline()) - f.seek(-len(magic_number), os.SEEK_CUR) + magic_number = asstr(f.fobj.read(len(cls.MAGIC_NUMBER))) + f.seek(-len(cls.MAGIC_NUMBER), os.SEEK_CUR) return magic_number.strip() == cls.MAGIC_NUMBER @@ -287,8 +287,8 @@ def _write_header(fileobj, header): fileobj.write(asbytes(str(new_offset) + "\n")) fileobj.write(asbytes("END\n")) - @staticmethod - def _read_header(fileobj): + @classmethod + def _read_header(cls, fileobj): """ Reads a TCK header from a file. Parameters @@ -304,23 +304,50 @@ def _read_header(fileobj): header : dict Metadata associated with this tractogram file. """ - # Record start position if this is a file-like object - start_position = fileobj.tell() if hasattr(fileobj, 'tell') else None + + # Build header dictionary from the buffer + hdr = {} + offset_data = 0 with Opener(fileobj) as f: + + # Record start position + start_position = f.fobj.tell() + + # Make sure we are at the beginning of the file + f.fobj.seek(0, os.SEEK_SET) + # Read magic number - magic_number = f.fobj.readline().strip() + magic_number = asstr(f.fobj.read(len(cls.MAGIC_NUMBER))) + + if magic_number != cls.MAGIC_NUMBER: + raise HeaderError(f"Invalid magic number: {magic_number}") - # Read all key-value pairs contained in the header. - buf = asstr(f.fobj.readline()) - while not buf.rstrip().endswith("END"): - buf += asstr(f.fobj.readline()) + hdr[Field.MAGIC_NUMBER] = magic_number + + f.fobj.seek(1, os.SEEK_CUR) # Skip \n + + # Read all key-value pairs contained in the header + for n_line, line in enumerate(f.fobj, 1): + line = asstr(line).strip() + + if not line: # Skip empty lines + continue + + if line == "END": # End of the header + break + + if ':' not in line: # Invalid header line + raise HeaderError(f"Invalid header (line {n_line}): {line}") + + key, value = line.split(":", 1) + hdr[key.strip()] = value.strip() offset_data = f.tell() - # Build header dictionary from the buffer. - hdr = dict(item.split(': ') for item in buf.rstrip().split('\n')[:-1]) - hdr[Field.MAGIC_NUMBER] = magic_number + # Set the file position where it was, in case it was previously open + if start_position is not None: + f.fobj.seek(start_position, os.SEEK_SET) # Check integrity of TCK header. if 'datatype' not in hdr: @@ -352,10 +379,6 @@ def _read_header(fileobj): # Keep the file position where the data begin. hdr['_offset_data'] = int(hdr['file'].split()[1]) - # Set the file position where it was, if it was previously open. - if start_position is not None: - fileobj.seek(start_position, os.SEEK_SET) - return hdr @classmethod diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index e2c6cf119a..d44e436b99 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -25,6 +25,8 @@ def setup_module(): global DATA DATA['empty_tck_fname'] = pjoin(data_path, "empty.tck") + DATA['no-magic-number_tck_fname'] = pjoin(data_path, "no-magic-number.tck") + DATA['no-header-end_tck_fname'] = pjoin(data_path, "no-header-end.tck") # simple.tck contains only streamlines DATA['simple_tck_fname'] = pjoin(data_path, "simple.tck") DATA['simple_tck_big_endian_fname'] = pjoin(data_path, @@ -50,6 +52,22 @@ def test_load_empty_file(self): with pytest.warns(Warning) if lazy_load else error_warnings(): assert_tractogram_equal(tck.tractogram, DATA['empty_tractogram']) + def test_load_no_magic_number_file(self): + for lazy_load in [False, True]: + with pytest.raises(HeaderError): + TckFile.load( + DATA['no-magic-number_tck_fname'], + lazy_load=lazy_load + ) + + def test_load_no_header_end_file(self): + for lazy_load in [False, True]: + with pytest.raises(HeaderError): + TckFile.load( + DATA['no-header-end_tck_fname'], + lazy_load=lazy_load + ) + def test_load_simple_file(self): for lazy_load in [False, True]: tck = TckFile.load(DATA['simple_tck_fname'], lazy_load=lazy_load) diff --git a/nibabel/tests/data/no-header-end.tck b/nibabel/tests/data/no-header-end.tck new file mode 100644 index 0000000000000000000000000000000000000000..2304f4192113e9274778881878901f911d92ca95 GIT binary patch literal 81 zcmc~TDk;jWP$(%%OwKOmO3p9ME3r~AfCH|S#FE64%7RoY1-G31#1dm8A6Krl%p4$J RPr=Nbi-F<&{=N0cBmh3s8}9%B literal 0 HcmV?d00001 diff --git a/nibabel/tests/data/no-magic-number.tck b/nibabel/tests/data/no-magic-number.tck new file mode 100644 index 0000000000000000000000000000000000000000..3a4fe7de72416642fca5280ef02cb50eaafc7495 GIT binary patch literal 71 zcmYe!FU>2lQZRr6u9U=*#FEN_R4WCyoczQRVB&xMPD;r;%- H^~fXu_k$OB literal 0 HcmV?d00001 From bd57006b68c68cf232e5f731d16cb0608bd89ddf Mon Sep 17 00:00:00 2001 From: Anibal Date: Wed, 28 Sep 2022 13:05:06 -0500 Subject: [PATCH 051/702] fix: one more test case, reaching EOF --- nibabel/streamlines/tck.py | 8 +++++++- nibabel/streamlines/tests/test_tck.py | 17 +++++++++++++---- .../{no-header-end.tck => no_header_end.tck} | Bin nibabel/tests/data/no_header_end_eof.tck | 4 ++++ ...no-magic-number.tck => no_magic_number.tck} | Bin 5 files changed, 24 insertions(+), 5 deletions(-) rename nibabel/tests/data/{no-header-end.tck => no_header_end.tck} (100%) create mode 100644 nibabel/tests/data/no_header_end_eof.tck rename nibabel/tests/data/{no-magic-number.tck => no_magic_number.tck} (100%) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 79bbca0f43..b9a45566d0 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -327,7 +327,9 @@ def _read_header(cls, fileobj): f.fobj.seek(1, os.SEEK_CUR) # Skip \n - # Read all key-value pairs contained in the header + found_end = False + + # Read all key-value pairs contained in the header, stop at EOF for n_line, line in enumerate(f.fobj, 1): line = asstr(line).strip() @@ -335,6 +337,7 @@ def _read_header(cls, fileobj): continue if line == "END": # End of the header + found_end = True break if ':' not in line: # Invalid header line @@ -343,6 +346,9 @@ def _read_header(cls, fileobj): key, value = line.split(":", 1) hdr[key.strip()] = value.strip() + if not found_end: + raise HeaderError("Missing END in the header.") + offset_data = f.tell() # Set the file position where it was, in case it was previously open diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index d44e436b99..1cdda4b44e 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -25,8 +25,9 @@ def setup_module(): global DATA DATA['empty_tck_fname'] = pjoin(data_path, "empty.tck") - DATA['no-magic-number_tck_fname'] = pjoin(data_path, "no-magic-number.tck") - DATA['no-header-end_tck_fname'] = pjoin(data_path, "no-header-end.tck") + DATA['no_magic_number_tck_fname'] = pjoin(data_path, "no_magic_number.tck") + DATA['no_header_end_tck_fname'] = pjoin(data_path, "no_header_end.tck") + DATA['no_header_end_eof_tck_fname'] = pjoin(data_path, "no_header_end_eof.tck") # simple.tck contains only streamlines DATA['simple_tck_fname'] = pjoin(data_path, "simple.tck") DATA['simple_tck_big_endian_fname'] = pjoin(data_path, @@ -56,7 +57,7 @@ def test_load_no_magic_number_file(self): for lazy_load in [False, True]: with pytest.raises(HeaderError): TckFile.load( - DATA['no-magic-number_tck_fname'], + DATA['no_magic_number_tck_fname'], lazy_load=lazy_load ) @@ -64,7 +65,15 @@ def test_load_no_header_end_file(self): for lazy_load in [False, True]: with pytest.raises(HeaderError): TckFile.load( - DATA['no-header-end_tck_fname'], + DATA['no_header_end_tck_fname'], + lazy_load=lazy_load + ) + + def test_load_no_header_end_eof_file(self): + for lazy_load in [False, True]: + with pytest.raises(HeaderError): + TckFile.load( + DATA['no_header_end_eof_tck_fname'], lazy_load=lazy_load ) diff --git a/nibabel/tests/data/no-header-end.tck b/nibabel/tests/data/no_header_end.tck similarity index 100% rename from nibabel/tests/data/no-header-end.tck rename to nibabel/tests/data/no_header_end.tck diff --git a/nibabel/tests/data/no_header_end_eof.tck b/nibabel/tests/data/no_header_end_eof.tck new file mode 100644 index 0000000000..ceb79ebfb7 --- /dev/null +++ b/nibabel/tests/data/no_header_end_eof.tck @@ -0,0 +1,4 @@ +mrtrix tracks +count: 0000000000 +datatype: Float32LE +file: . 67 \ No newline at end of file diff --git a/nibabel/tests/data/no-magic-number.tck b/nibabel/tests/data/no_magic_number.tck similarity index 100% rename from nibabel/tests/data/no-magic-number.tck rename to nibabel/tests/data/no_magic_number.tck From 6bc5bf9e245857f1f2fdbd2a2a2ed448acd0a3bf Mon Sep 17 00:00:00 2001 From: Anibal Date: Wed, 28 Sep 2022 13:18:14 -0500 Subject: [PATCH 052/702] fix: linting --- nibabel/streamlines/tck.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index b9a45566d0..e39e368315 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -325,7 +325,7 @@ def _read_header(cls, fileobj): hdr[Field.MAGIC_NUMBER] = magic_number - f.fobj.seek(1, os.SEEK_CUR) # Skip \n + f.fobj.seek(1, os.SEEK_CUR) # Skip \n found_end = False @@ -333,14 +333,14 @@ def _read_header(cls, fileobj): for n_line, line in enumerate(f.fobj, 1): line = asstr(line).strip() - if not line: # Skip empty lines + if not line: # Skip empty lines continue - if line == "END": # End of the header + if line == "END": # End of the header found_end = True break - if ':' not in line: # Invalid header line + if ':' not in line: # Invalid header line raise HeaderError(f"Invalid header (line {n_line}): {line}") key, value = line.split(":", 1) From a4c420df71c9fc912204cac69406d7cc8ca8910e Mon Sep 17 00:00:00 2001 From: Anibal Solon Date: Fri, 30 Sep 2022 12:31:35 -0500 Subject: [PATCH 053/702] fix: use interface from Opener --- nibabel/streamlines/tck.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index e39e368315..823a88b5cf 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -91,10 +91,10 @@ def is_correct_format(cls, fileobj): otherwise returns False. """ with Opener(fileobj) as f: - magic_number = asstr(f.fobj.read(len(cls.MAGIC_NUMBER))) + magic_number = f.read(len(cls.MAGIC_NUMBER)) f.seek(-len(cls.MAGIC_NUMBER), os.SEEK_CUR) - return magic_number.strip() == cls.MAGIC_NUMBER + return asstr(magic_number) == cls.MAGIC_NUMBER @classmethod def create_empty_header(cls): @@ -312,25 +312,25 @@ def _read_header(cls, fileobj): with Opener(fileobj) as f: # Record start position - start_position = f.fobj.tell() + start_position = f.tell() # Make sure we are at the beginning of the file - f.fobj.seek(0, os.SEEK_SET) + f.seek(0, os.SEEK_SET) # Read magic number - magic_number = asstr(f.fobj.read(len(cls.MAGIC_NUMBER))) + magic_number = f.read(len(cls.MAGIC_NUMBER)) - if magic_number != cls.MAGIC_NUMBER: + if asstr(magic_number) != cls.MAGIC_NUMBER: raise HeaderError(f"Invalid magic number: {magic_number}") hdr[Field.MAGIC_NUMBER] = magic_number - f.fobj.seek(1, os.SEEK_CUR) # Skip \n + f.seek(1, os.SEEK_CUR) # Skip \n found_end = False # Read all key-value pairs contained in the header, stop at EOF - for n_line, line in enumerate(f.fobj, 1): + for n_line, line in enumerate(f, 1): line = asstr(line).strip() if not line: # Skip empty lines @@ -353,7 +353,7 @@ def _read_header(cls, fileobj): # Set the file position where it was, in case it was previously open if start_position is not None: - f.fobj.seek(start_position, os.SEEK_SET) + f.seek(start_position, os.SEEK_SET) # Check integrity of TCK header. if 'datatype' not in hdr: From a85f4a07684429750eeaf50f4a5c7b29737d7974 Mon Sep 17 00:00:00 2001 From: Anibal Solon Date: Fri, 30 Sep 2022 16:41:25 -0500 Subject: [PATCH 054/702] enh: assume file mode is binary --- nibabel/streamlines/tck.py | 42 ++++++++++++--------------- nibabel/streamlines/tests/test_tck.py | 28 ++++++++++++++++++ 2 files changed, 46 insertions(+), 24 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 823a88b5cf..36f76e617c 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -8,7 +8,6 @@ import warnings import numpy as np -from numpy.compat.py3k import asbytes, asstr from nibabel.openers import Opener @@ -44,7 +43,7 @@ class TckFile(TractogramFile): .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ # Constants - MAGIC_NUMBER = "mrtrix tracks" + MAGIC_NUMBER = b"mrtrix tracks" SUPPORTS_DATA_PER_POINT = False # Not yet SUPPORTS_DATA_PER_STREAMLINE = False # Not yet @@ -94,7 +93,7 @@ def is_correct_format(cls, fileobj): magic_number = f.read(len(cls.MAGIC_NUMBER)) f.seek(-len(cls.MAGIC_NUMBER), os.SEEK_CUR) - return asstr(magic_number) == cls.MAGIC_NUMBER + return magic_number == cls.MAGIC_NUMBER @classmethod def create_empty_header(cls): @@ -230,7 +229,7 @@ def save(self, fileobj): header[Field.NB_STREAMLINES] = nb_streamlines # Add the EOF_DELIMITER. - f.write(asbytes(self.EOF_DELIMITER.tobytes())) + f.write(self.EOF_DELIMITER.tobytes()) self._finalize_header(f, header, offset=beginning) @staticmethod @@ -251,13 +250,11 @@ def _write_header(fileobj, header): "count", "datatype", "file"] # Fields being replaced. lines = [] - lines.append(asstr(header[Field.MAGIC_NUMBER])) lines.append(f"count: {header[Field.NB_STREAMLINES]:010}") lines.append("datatype: Float32LE") # Always Float32LE. lines.extend([f"{k}: {v}" for k, v in header.items() if k not in exclude and not k.startswith("_")]) - lines.append("file: . ") # Manually add this last field. out = "\n".join(lines) # Check the header is well formatted. @@ -265,27 +262,24 @@ def _write_header(fileobj, header): msg = f"Key-value pairs cannot contain '\\n':\n{out}" raise HeaderError(msg) - if out.count(":") > len(lines) - 1: + if out.count(":") > len(lines): # : only one per line (except the last one which contains END). msg = f"Key-value pairs cannot contain ':':\n{out}" raise HeaderError(msg) + out = header[Field.MAGIC_NUMBER] + b"\n" + out.encode('utf-8') + + # Compute data offset considering the offset string representation + hdr_offset = len(out) + 8 + 3 + 3 # "file" header, END, and \n's + offset_repr = f'{hdr_offset}' + + # Adding the offset increases one char to the offset repr + hdr_offset += len(f'{hdr_offset + len(offset_repr)}') + # Write header to file. - fileobj.write(asbytes(out)) - - hdr_len_no_offset = len(out) + 5 - # Need to add number of bytes to store offset as decimal string. We - # start with estimate without string, then update if the - # offset-as-decimal-string got longer after adding length of the - # offset string. - new_offset = -1 - old_offset = hdr_len_no_offset - while new_offset != old_offset: - old_offset = new_offset - new_offset = hdr_len_no_offset + len(str(old_offset)) - - fileobj.write(asbytes(str(new_offset) + "\n")) - fileobj.write(asbytes("END\n")) + fileobj.write(out) + fileobj.write(b"\nfile: . " + f'{hdr_offset}'.encode('utf-8') + b"\n") + fileobj.write(b"END\n") @classmethod def _read_header(cls, fileobj): @@ -320,7 +314,7 @@ def _read_header(cls, fileobj): # Read magic number magic_number = f.read(len(cls.MAGIC_NUMBER)) - if asstr(magic_number) != cls.MAGIC_NUMBER: + if magic_number != cls.MAGIC_NUMBER: raise HeaderError(f"Invalid magic number: {magic_number}") hdr[Field.MAGIC_NUMBER] = magic_number @@ -331,7 +325,7 @@ def _read_header(cls, fileobj): # Read all key-value pairs contained in the header, stop at EOF for n_line, line in enumerate(f, 1): - line = asstr(line).strip() + line = line.decode('utf-8').strip() if not line: # Skip empty lines continue diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index 1cdda4b44e..75786c87c6 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -216,6 +216,34 @@ def test_write_simple_file(self): with pytest.raises(HeaderError): tck.save(tck_file) + def test_write_bigheader_file(self): + tractogram = Tractogram(DATA['streamlines'], + affine_to_rasmm=np.eye(4)) + + # Offset is represented by 2 characters. + tck_file = BytesIO() + tck = TckFile(tractogram) + tck.header['new_entry'] = ' ' * 20 + tck.save(tck_file) + tck_file.seek(0, os.SEEK_SET) + + new_tck = TckFile.load(tck_file) + assert_tractogram_equal(new_tck.tractogram, tractogram) + assert new_tck.header['_offset_data'] == 99 + + # We made the jump, now offset is represented by 3 characters + # and we need to adjust the offset! + tck_file = BytesIO() + tck = TckFile(tractogram) + tck.header['new_entry'] = ' ' * 21 + tck.save(tck_file) + tck_file.seek(0, os.SEEK_SET) + + new_tck = TckFile.load(tck_file) + assert_tractogram_equal(new_tck.tractogram, tractogram) + assert new_tck.header['_offset_data'] == 101 + + def test_load_write_file(self): for fname in [DATA['empty_tck_fname'], DATA['simple_tck_fname']]: From 5a714938839d36f9b82b3d707c22144219aaf2fa Mon Sep 17 00:00:00 2001 From: Anibal Solon Date: Wed, 5 Oct 2022 11:20:26 -0500 Subject: [PATCH 055/702] enh: improve comments --- nibabel/streamlines/tck.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 36f76e617c..87a59a520a 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -270,10 +270,11 @@ def _write_header(fileobj, header): out = header[Field.MAGIC_NUMBER] + b"\n" + out.encode('utf-8') # Compute data offset considering the offset string representation - hdr_offset = len(out) + 8 + 3 + 3 # "file" header, END, and \n's + # headers + "file" header + END + \n's + hdr_offset = len(out) + 8 + 3 + 3 offset_repr = f'{hdr_offset}' - # Adding the offset increases one char to the offset repr + # Adding the offset may increase one char to the offset repr hdr_offset += len(f'{hdr_offset + len(offset_repr)}') # Write header to file. From 203ed0c8bc139bb7951583518af81a4814801fb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anibal=20S=C3=B3lon?= Date: Fri, 18 Nov 2022 15:46:45 -0600 Subject: [PATCH 056/702] Use f-string to interpolate variable Co-authored-by: Chris Markiewicz --- nibabel/streamlines/tck.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 87a59a520a..c9bba94a6e 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -279,8 +279,7 @@ def _write_header(fileobj, header): # Write header to file. fileobj.write(out) - fileobj.write(b"\nfile: . " + f'{hdr_offset}'.encode('utf-8') + b"\n") - fileobj.write(b"END\n") + fileobj.write(f'\nfile: . {hdr_offset}\nEND\n'.encode('utf-8')) @classmethod def _read_header(cls, fileobj): From d95e0a319940de12043ee6c586dbc6002ad6e851 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 7 Nov 2022 09:46:49 -0500 Subject: [PATCH 057/702] DOCTEST: Changed AttributeError message --- nibabel/wrapstruct.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index 92524508ab..af64a762c3 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -411,7 +411,7 @@ def structarr(self): >>> wstr1.structarr = None Traceback (most recent call last): ... - AttributeError: can't set attribute... + AttributeError: ... """ return self._structarr From 9da60e5bd7701bb5d6143f97d920334b63a00994 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 20 Nov 2022 21:40:04 +0100 Subject: [PATCH 058/702] MNT: No need to inherit `object` in Python 3 Classes inherit from `object` implicitly. --- doc/source/devel/biaps/biap_0002.rst | 4 ++-- doc/tools/apigen.py | 4 ++-- nibabel/arraywriters.py | 2 +- nibabel/batteryrunners.py | 4 ++-- nibabel/cmdline/dicomfs.py | 2 +- nibabel/data.py | 4 ++-- nibabel/deprecated.py | 6 +++--- nibabel/deprecator.py | 2 +- nibabel/dft.py | 6 +++--- nibabel/ecat.py | 4 ++-- nibabel/filebasedimages.py | 4 ++-- nibabel/fileholders.py | 2 +- nibabel/fileslice.py | 2 +- nibabel/imageglobals.py | 4 ++-- nibabel/minc1.py | 4 ++-- nibabel/minc2.py | 2 +- nibabel/nicom/dicomwrappers.py | 2 +- nibabel/nicom/structreader.py | 2 +- nibabel/nicom/tests/test_dicomwrappers.py | 12 ++++++------ nibabel/nifti1.py | 2 +- nibabel/onetime.py | 6 +++--- nibabel/openers.py | 2 +- nibabel/parrec.py | 2 +- nibabel/spatialimages.py | 2 +- nibabel/streamlines/array_sequence.py | 4 ++-- nibabel/streamlines/header.py | 2 +- nibabel/streamlines/tractogram.py | 4 ++-- nibabel/tests/scriptrunner.py | 2 +- nibabel/tests/test_analyze.py | 6 +++--- nibabel/tests/test_api_validators.py | 2 +- nibabel/tests/test_arrayproxy.py | 6 +++--- nibabel/tests/test_brikhead.py | 8 ++++---- nibabel/tests/test_deprecated.py | 2 +- nibabel/tests/test_deprecator.py | 4 ++-- nibabel/tests/test_ecat_data.py | 2 +- nibabel/tests/test_image_api.py | 4 ++-- nibabel/tests/test_keywordonly.py | 2 +- nibabel/tests/test_minc1.py | 2 +- nibabel/tests/test_minc2_data.py | 2 +- nibabel/tests/test_nifti1.py | 2 +- nibabel/tests/test_nifti2.py | 2 +- nibabel/tests/test_openers.py | 2 +- nibabel/tests/test_parrec.py | 2 +- nibabel/tests/test_recoder.py | 2 +- nibabel/tests/test_spatialimages.py | 6 +++--- nibabel/tests/test_spm99analyze.py | 4 ++-- nibabel/tmpdirs.py | 4 ++-- nibabel/tripwire.py | 2 +- nibabel/viewers.py | 2 +- nibabel/volumeutils.py | 6 +++--- nibabel/wrapstruct.py | 2 +- nibabel/xmlutils.py | 4 ++-- nisext/sexts.py | 2 +- 53 files changed, 90 insertions(+), 90 deletions(-) diff --git a/doc/source/devel/biaps/biap_0002.rst b/doc/source/devel/biaps/biap_0002.rst index 1dccfce746..89ba4e913a 100644 --- a/doc/source/devel/biaps/biap_0002.rst +++ b/doc/source/devel/biaps/biap_0002.rst @@ -104,8 +104,8 @@ sliced array, as in: .. code:: python - class SomeImage(object): - class Slicer(object): + class SomeImage: + class Slicer: def __init__(self, parent): self.parent = parent def __getitem__(self, slicedef): diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index 52966300e2..cabbd319c5 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -28,7 +28,7 @@ DEBUG = True -class ApiDocWriter(object): +class ApiDocWriter: """ Class for automatic detection and parsing of API docs to Sphinx-parsable reST format""" @@ -118,7 +118,7 @@ def _get_object_name(self, line): >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' - >>> docwriter._get_object_name(" class Klass(object): ") + >>> docwriter._get_object_name(" class Klass: ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 8523d9fedd..cdbec32fc6 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -45,7 +45,7 @@ class ScalingError(WriterError): pass -class ArrayWriter(object): +class ArrayWriter: def __init__(self, array, out_dtype=None, **kwargs): r""" Initialize array writer diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index 882c1814ef..a860ba3778 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -108,7 +108,7 @@ def chk_pixdims(hdr, fix=True): """ -class BatteryRunner(object): +class BatteryRunner: """ Class to run set of checks """ def __init__(self, checks): @@ -174,7 +174,7 @@ def __len__(self): return len(self._checks) -class Report(object): +class Report: def __init__(self, error=Exception, diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index d4c9d8ff1f..9b1b735cca 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -18,7 +18,7 @@ import logging -class dummy_fuse(object): +class dummy_fuse: """Dummy fuse "module" so that nose does not blow during doctests""" Fuse = object diff --git a/nibabel/data.py b/nibabel/data.py index 438cd2aa58..3845912936 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -31,7 +31,7 @@ class BomberError(DataError, AttributeError): pass -class Datasource(object): +class Datasource: """ Simple class to add base path to relative path """ def __init__(self, base_path): @@ -302,7 +302,7 @@ def make_datasource(pkg_def, **kwargs): return VersionedDatasource(pth) -class Bomber(object): +class Bomber: """ Class to raise an informative error when used """ def __init__(self, name, msg): diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 2dd1f11db3..576d18b5ce 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -7,7 +7,7 @@ from .pkg_info import cmp_pkg_version -class ModuleProxy(object): +class ModuleProxy: """ Proxy for module that may not yet have been imported Parameters @@ -39,12 +39,12 @@ def __repr__(self): return f"" -class FutureWarningMixin(object): +class FutureWarningMixin: """ Insert FutureWarning for object creation Examples -------- - >>> class C(object): pass + >>> class C: pass >>> class D(FutureWarningMixin, C): ... warn_message = "Please, don't use this class" diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 7dc32bbe58..3ff9516aec 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -83,7 +83,7 @@ def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): old_lines[next_line:] + cleanup_lines + ['']) -class Deprecator(object): +class Deprecator: """ Class to make decorator marking function or method as deprecated The decorated function / method will: diff --git a/nibabel/dft.py b/nibabel/dft.py index f87002379e..3de1b31254 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -58,7 +58,7 @@ def __str__(self): return fmt % (self.i + 1, self.si.instance_number) -class _Study(object): +class _Study: def __init__(self, d): self.uid = d['uid'] @@ -91,7 +91,7 @@ def patient_name_or_uid(self): return self.patient_name -class _Series(object): +class _Series: def __init__(self, d): self.uid = d['uid'] @@ -219,7 +219,7 @@ def nifti_size(self): return 352 + 2 * len(self.storage_instances) * self.columns * self.rows -class _StorageInstance(object): +class _StorageInstance: def __init__(self, d): self.uid = d['uid'] diff --git a/nibabel/ecat.py b/nibabel/ecat.py index f87778fc6c..f206e482de 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -511,7 +511,7 @@ def read_subheaders(fileobj, mlist, endianness): return subheaders -class EcatSubHeader(object): +class EcatSubHeader: _subhdrdtype = subhdr_dtype _data_type_codes = data_type_codes @@ -660,7 +660,7 @@ def data_from_fileobj(self, frame=0, orientation=None): return data -class EcatImageArrayProxy(object): +class EcatImageArrayProxy: """ Ecat implementation of array proxy protocol The array proxy allows us to freeze the passed fileobj and diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 4a194576b3..180fe7c6a7 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -22,7 +22,7 @@ class ImageFileError(Exception): pass -class FileBasedHeader(object): +class FileBasedHeader: """ Template class to implement header protocol """ @classmethod @@ -60,7 +60,7 @@ def copy(self): return deepcopy(self) -class FileBasedImage(object): +class FileBasedImage: """ Abstract image class with interface for loading/saving images from disk. diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index c996725991..f7dc9629fd 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -17,7 +17,7 @@ class FileHolderError(Exception): pass -class FileHolder(object): +class FileHolder: """ class to contain filename, fileobj and file position """ diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index d0bd3ca721..cc850132b8 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -16,7 +16,7 @@ SKIP_THRESH = 2 ** 8 -class _NullLock(object): +class _NullLock: """Can be used as no-function dummy object in place of ``threading.lock``. The ``_NullLock`` is an object which can be used in place of a diff --git a/nibabel/imageglobals.py b/nibabel/imageglobals.py index 91ebaf38ea..4cdeb7b1a3 100644 --- a/nibabel/imageglobals.py +++ b/nibabel/imageglobals.py @@ -31,7 +31,7 @@ logger.addHandler(logging.StreamHandler()) -class ErrorLevel(object): +class ErrorLevel: """ Context manager to set log error level """ @@ -49,7 +49,7 @@ def __exit__(self, exc, value, tb): return False -class LoggingOutputSuppressor(object): +class LoggingOutputSuppressor: """Context manager to prevent global logger from printing""" def __enter__(self): diff --git a/nibabel/minc1.py b/nibabel/minc1.py index c172f40a8e..c0ae95bd7b 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -39,7 +39,7 @@ class MincError(Exception): """ Error when reading MINC files """ -class Minc1File(object): +class Minc1File: """ Class to wrap MINC1 format opened netcdf object Although it has some of the same methods as a ``Header``, we use @@ -235,7 +235,7 @@ def get_scaled_data(self, sliceobj=()): return self._normalize(data, sliceobj) -class MincImageArrayProxy(object): +class MincImageArrayProxy: """ MINC implementation of array proxy protocol The array proxy allows us to freeze the passed fileobj and diff --git a/nibabel/minc2.py b/nibabel/minc2.py index a71ec7c693..3dce425609 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -30,7 +30,7 @@ from .minc1 import Minc1File, MincHeader, Minc1Image, MincError -class Hdf5Bunch(object): +class Hdf5Bunch: """ Make object for accessing attributes of variable """ diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 38e501be5c..85e1655bc7 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -96,7 +96,7 @@ def wrapper_from_data(dcm_data): return SiemensWrapper(dcm_data, csa) -class Wrapper(object): +class Wrapper: """ Class to wrap general DICOM files Methods: diff --git a/nibabel/nicom/structreader.py b/nibabel/nicom/structreader.py index 644f50d345..eb714804f1 100644 --- a/nibabel/nicom/structreader.py +++ b/nibabel/nicom/structreader.py @@ -5,7 +5,7 @@ _ENDIAN_CODES = '@=<>!' -class Unpacker(object): +class Unpacker: """ Class to unpack values from buffer object The buffer object is usually a string. Caches compiled :mod:`struct` diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 11b5b482b9..fcb3cc1703 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -124,7 +124,7 @@ class FakeData(dict): assert dw.get('some_key') is None # Check get defers to dcm_data get - class FakeData2(object): + class FakeData2: def get(self, key, default): return 1 @@ -268,7 +268,7 @@ def test_vol_matching(): # Just to check the interface, make a pretend signature-providing # object. - class C(object): + class C: series_signature = {} assert dw_empty.is_same_series(C()) @@ -386,7 +386,7 @@ def fake_frames(seq_name, field_name, value_seq): each element in list is obj.[0]. = value_seq[n] for n in range(N) """ - class Fake(object): + class Fake: pass frames = [] for value in value_seq: @@ -410,16 +410,16 @@ def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): sid_dim : int the index of the column in 'div_seq' to use as 'sid_seq' """ - class DimIdxSeqElem(object): + class DimIdxSeqElem: def __init__(self, dip=(0, 0), fgp=None): self.DimensionIndexPointer = dip if fgp is not None: self.FunctionalGroupPointer = fgp - class FrmContSeqElem(object): + class FrmContSeqElem: def __init__(self, div, sid): self.DimensionIndexValues = div self.StackID = sid - class PerFrmFuncGrpSeqElem(object): + class PerFrmFuncGrpSeqElem: def __init__(self, div, sid): self.FrameContentSequence = [FrmContSeqElem(div, sid)] # if no StackID values passed in then use the values at index 'sid_dim' in diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 5be146a89c..d434e50c32 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -253,7 +253,7 @@ ), fields=('code', 'label', 'parameters', 'niistring')) -class Nifti1Extension(object): +class Nifti1Extension: """Baseclass for NIfTI1 header extensions. This class is sufficient to handle very simple text-based extensions, such diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 3ca31bea50..6b8debc51b 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- -class ResetMixin(object): +class ResetMixin: """A Mixin class to add a .reset() method to users of OneTimeProperty. By default, auto attributes once computed, become static. If they happen @@ -109,7 +109,7 @@ def reset(self): delattr(self, mname) -class OneTimeProperty(object): +class OneTimeProperty: """A descriptor to make special properties that become normal attributes. This is meant to be used mostly by the auto_attr decorator in this module. @@ -157,7 +157,7 @@ def auto_attr(func): Examples -------- - >>> class MagicProp(object): + >>> class MagicProp: ... @auto_attr ... def a(self): ... return 99 diff --git a/nibabel/openers.py b/nibabel/openers.py index b593ef82e7..b50da10c59 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -80,7 +80,7 @@ def _zstd_open(filename, mode="r", *, level_or_option=None, zstd_dict=None): level_or_option=level_or_option, zstd_dict=zstd_dict) -class Opener(object): +class Opener: r""" Class to accept, maybe open, and context-manage file-likes / filenames Provides context manager to close files that the constructor opened for diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 046e1ec704..c2d7160806 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -579,7 +579,7 @@ def exts2pars(exts_source): return headers -class PARRECArrayProxy(object): +class PARRECArrayProxy: def __init__(self, file_like, header, *, mmap=True, scaling='dv'): """ Initialize PARREC array proxy diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 9a2dc76db7..d2e69a0fc5 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -322,7 +322,7 @@ class ImageDataError(Exception): pass -class SpatialFirstSlicer(object): +class SpatialFirstSlicer: """ Slicing interface that returns a new image with an updated affine Checks that an image's first three axes are spatial diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 2cefc84e47..5d40937b1c 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -24,7 +24,7 @@ def is_ndarray_of_int_or_bool(obj): np.issubdtype(obj.dtype, np.bool_))) -class _BuildCache(object): +class _BuildCache: def __init__(self, arr_seq, common_shape, dtype): self.offsets = list(arr_seq._offsets) self.lengths = list(arr_seq._lengths) @@ -88,7 +88,7 @@ def fn_binary_op(self, value): @_define_operators -class ArraySequence(object): +class ArraySequence: """ Sequence of ndarrays having variable first dimension sizes. This is a container that can store multiple ndarrays where each ndarray diff --git a/nibabel/streamlines/header.py b/nibabel/streamlines/header.py index 9ee7854d30..523035f3ee 100644 --- a/nibabel/streamlines/header.py +++ b/nibabel/streamlines/header.py @@ -2,7 +2,7 @@ """ -class Field(object): +class Field: """ Header fields common to multiple streamline file formats. In IPython, use `nibabel.streamlines.Field??` to list them. diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index 3d48b4b687..5b67af1ab3 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -220,7 +220,7 @@ def __len__(self): return len(self.store) -class TractogramItem(object): +class TractogramItem: """ Class containing information about one streamline. :class:`TractogramItem` objects have three public attributes: `streamline`, @@ -253,7 +253,7 @@ def __len__(self): return len(self.streamline) -class Tractogram(object): +class Tractogram: """ Container for streamlines and their data information. Streamlines of a tractogram can be in any coordinate system of your diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index bc7e9977f0..1bffd01929 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -46,7 +46,7 @@ def local_module_dir(module_name): return None -class ScriptRunner(object): +class ScriptRunner: """ Class to run scripts and return output Finds local scripts and local modules if running in the development diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index bbcdfbeefc..e68dcf0685 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -523,7 +523,7 @@ def test_from_header(self): assert hdr == copy assert hdr is not copy - class C(object): + class C: def get_data_dtype(self): return np.dtype('i2') @@ -619,12 +619,12 @@ def test_from_analyze_map(self): klass = self.header_class # Header needs to implement data_dtype, data_shape, zooms - class H1(object): + class H1: pass with pytest.raises(AttributeError): klass.from_header(H1()) - class H2(object): + class H2: def get_data_dtype(self): return np.dtype('u1') diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 76043348c9..668f4351db 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -61,7 +61,7 @@ def obj_params(self): against ``obj``. See the :meth:`validate_something` method for an example. """ - class C(object): + class C: def __init__(self, var): self.var = var diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index ed89105aa0..6857fdb1f7 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -36,7 +36,7 @@ from .test_openers import patch_indexed_gzip -class FunkyHeader(object): +class FunkyHeader: def __init__(self, shape): self.shape = shape @@ -258,7 +258,7 @@ def test_is_proxy(): assert not is_proxy(hdr) assert not is_proxy(np.zeros((2, 3, 4))) - class NP(object): + class NP: is_proxy = False assert not is_proxy(NP()) @@ -280,7 +280,7 @@ def test_reshape_dataobj(): np.reshape(arr, (2, 3, 4))) assert arr.shape == shape - class ArrGiver(object): + class ArrGiver: def __array__(self): return arr diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index 45e149b93b..9f3bfdd93c 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -71,7 +71,7 @@ ) ] -class TestAFNIHeader(object): +class TestAFNIHeader: module = brikhead test_files = EXAMPLE_IMAGES @@ -86,7 +86,7 @@ def test_makehead(self): self.module.AFNIHeader.from_header(tp['fname']) -class TestAFNIImage(object): +class TestAFNIImage: module = brikhead test_files = EXAMPLE_IMAGES @@ -127,7 +127,7 @@ def test_array_proxy_slicing(self): assert_array_equal(arr[sliceobj], prox[sliceobj]) -class TestBadFiles(object): +class TestBadFiles: module = brikhead test_files = EXAMPLE_BAD_IMAGES @@ -137,7 +137,7 @@ def test_brikheadfile(self): self.module.load(tp['head']) -class TestBadVars(object): +class TestBadVars: module = brikhead vars = ['type = badtype-attribute\nname = BRICK_TYPES\ncount = 1\n1\n', 'type = integer-attribute\ncount = 1\n1\n'] diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index fe645154be..c09fda4988 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -32,7 +32,7 @@ def test_module_proxy(): def test_futurewarning_mixin(): # Test mixin for FutureWarning - class C(object): + class C: def __init__(self, val): self.val = val diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 12c5b36b92..2e7a0b9ba9 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -62,7 +62,7 @@ def func_doc_long(i, j): "A docstring\n\n Some text" -class TestDeprecatorFunc(object): +class TestDeprecatorFunc: """ Test deprecator function specified in ``dep_func`` """ dep_func = Deprecator(cmp_func) @@ -136,7 +136,7 @@ def test_dep_func(self): func() -class TestDeprecatorMaker(object): +class TestDeprecatorMaker: """ Test deprecator class creation with custom warnings and errors """ dep_maker = partial(Deprecator, cmp_func) diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index 1accd01a14..dcd812c52d 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -22,7 +22,7 @@ ECAT_TEST_PATH = pjoin(get_nibabel_data(), 'nipy-ecattest') -class TestNegatives(object): +class TestNegatives: opener = staticmethod(load) example_params = dict( fname=os.path.join(ECAT_TEST_PATH, 'ECAT7_testcaste_neg_values.v'), diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index e4287988d7..21c7b14086 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -481,7 +481,7 @@ def validate_mmap_parameter(self, imaker, params): del rt_img # to allow windows to delete the directory -class HeaderShapeMixin(object): +class HeaderShapeMixin: """ Tests that header shape can be set and got Add this one of your header supports ``get_data_shape`` and @@ -499,7 +499,7 @@ def validate_header_shape(self, imaker, params): assert img.header.get_data_shape() == new_shape -class AffineMixin(object): +class AffineMixin: """ Adds test of affine property, method Add this one if your image has an ``affine`` property. If so, it should diff --git a/nibabel/tests/test_keywordonly.py b/nibabel/tests/test_keywordonly.py index 26e21ce02d..0ef631dbf4 100644 --- a/nibabel/tests/test_keywordonly.py +++ b/nibabel/tests/test_keywordonly.py @@ -33,7 +33,7 @@ def kw_func(an_arg, a_kwarg='thing'): kw_func(1, akeyarg=3) assert kw_func.__doc__ == 'Another docstring' - class C(object): + class C: @kw_only_meth(1) def kw_meth(self, an_arg, a_kwarg='thing'): diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 829523c4c9..4fecf5782e 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -103,7 +103,7 @@ ] -class _TestMincFile(object): +class _TestMincFile: module = minc1 file_class = Minc1File fname = EG_FNAME diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index ebd53d7ced..fda6c1f8ec 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -34,7 +34,7 @@ def _make_affine(coses, zooms, starts): return affine -class TestEPIFrame(object): +class TestEPIFrame: opener = staticmethod(top_load) x_cos = [1, 0, 0] y_cos = [0., 1, 0] diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 8ed897b036..63cf13c103 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -1387,7 +1387,7 @@ def test_nifti_dicom_extension(): Nifti1DicomExtension(2, 0) -class TestNifti1General(object): +class TestNifti1General: """ Test class to test nifti1 in general Tests here which mix the pair and the single type, and that should only be diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index dbc187f039..106e3ec787 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -26,7 +26,7 @@ image_file = os.path.join(data_path, 'example_nifti2.nii.gz') -class _Nifti2Mixin(object): +class _Nifti2Mixin: example_file = header_file sizeof_hdr = Nifti2Header.sizeof_hdr quat_dtype = np.float64 diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 541af368c5..0a687353e3 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -34,7 +34,7 @@ pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") -class Lunk(object): +class Lunk: # bare file-like for testing closed = False diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 9a8f2b1dfc..f40bf3b80a 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -728,7 +728,7 @@ def test_image_creation(): assert_array_equal(img.dataobj, arr_prox_fp) -class FakeHeader(object): +class FakeHeader: """ Minimal API of header for PARRECArrayProxy """ diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 713e192707..127a7b0704 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -85,7 +85,7 @@ def test_recoder_6(): def test_custom_dicter(): # Allow custom dict-like object in constructor - class MyDict(object): + class MyDict: def __init__(self): self._keys = [] diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index dd707aa242..e7cad0de2c 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -69,7 +69,7 @@ def test_from_header(): assert hdr == copy assert hdr is not copy - class C(object): + class C: def get_data_dtype(self): return np.dtype('u2') @@ -188,7 +188,7 @@ class CHeader(SpatialHeader): assert (data == data2).all() -class DataLike(object): +class DataLike: # Minimal class implementing 'data' API shape = (3,) @@ -530,7 +530,7 @@ def test_slicer(self): assert (sliced_data == img.get_fdata()[sliceobj]).all() -class MmapImageMixin(object): +class MmapImageMixin: """ Mixin for testing images that may return memory maps """ #: whether to test mode of returned memory map check_mmap_mode = True diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 492faf1d51..d2ba898fa6 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -47,7 +47,7 @@ NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES -class HeaderScalingMixin(object): +class HeaderScalingMixin: """ Mixin to add scaling tests to header tests Needs to be a mixin so nifti tests can use this method without inheriting @@ -155,7 +155,7 @@ def test_origin_checks(self): assert dxer(hdr.binaryblock) == 'very large origin values relative to dims' -class ImageScalingMixin(object): +class ImageScalingMixin: # Mixin to add scaling checks to image test class # Nifti tests inherits from Analyze tests not Spm Analyze tests. We need # these tests for Nifti scaling, hence the mixin. diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index c7452027bb..10b5ee78f5 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -13,7 +13,7 @@ from tempfile import template, mkdtemp -class TemporaryDirectory(object): +class TemporaryDirectory: """Create and return a temporary directory. This has the same behavior as mkdtemp but can be used as a context manager. @@ -81,7 +81,7 @@ def __exit__(self, exc, value, tb): return super(InTemporaryDirectory, self).__exit__(exc, value, tb) -class InGivenDirectory(object): +class InGivenDirectory: """ Change directory to given directory for duration of ``with`` block Useful when you want to use `InTemporaryDirectory` for the final test, but diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index abb54268d4..db659df337 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -29,7 +29,7 @@ def is_tripwire(obj): return False -class TripWire(object): +class TripWire: """ Class raising error if used Standard use is to proxy modules that we could not import diff --git a/nibabel/viewers.py b/nibabel/viewers.py index e435ac4ac9..65e813ef0f 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -12,7 +12,7 @@ from .orientations import aff2axcodes, axcodes2ornt -class OrthoSlicer3D(object): +class OrthoSlicer3D: """ Orthogonal-plane slice viewer. OrthoSlicer3d expects 3- or 4-dimensional array data. It treats diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index abbcfc1afd..dc82287dbb 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -48,7 +48,7 @@ COMPRESSED_FILE_LIKES = (*COMPRESSED_FILE_LIKES, pyzstd.ZstdFile) -class Recoder(object): +class Recoder: """ class to return canonical code(s) from code or aliases The concept is a lot easier to read in the implementation and @@ -224,7 +224,7 @@ def value_set(self, name=None): endian_codes = Recoder(endian_codes) -class DtypeMapper(object): +class DtypeMapper: """ Specialized mapper for numpy dtypes We pass this mapper into the Recoder class to deal with numpy dtype @@ -307,7 +307,7 @@ def pretty_mapping(mapping, getterfunc=None): >>> d = {'a key': 'a value'} >>> print(pretty_mapping(d)) a key : a value - >>> class C(object): # to control ordering, show get_ method + >>> class C: # to control ordering, show get_ method ... def __iter__(self): ... return iter(('short_field','longer_field')) ... def __getitem__(self, key): diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index af64a762c3..c0c13710d6 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -121,7 +121,7 @@ class WrapStructError(Exception): pass -class WrapStruct(object): +class WrapStruct: # placeholder datatype template_dtype = np.dtype([('integer', 'i2')]) diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 1eb7e0ca02..d907f95e10 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -17,7 +17,7 @@ from .filebasedimages import FileBasedHeader -class XmlSerializable(object): +class XmlSerializable: """ Basic interface for serializing an object to xml""" def _to_xml_element(self): @@ -35,7 +35,7 @@ class XmlBasedHeader(FileBasedHeader, XmlSerializable): """ Basic wrapper around FileBasedHeader and XmlSerializable.""" -class XmlParser(object): +class XmlParser: """ Base class for defining how to parse xml-based image snippets. Image-specific parsers should define: diff --git a/nisext/sexts.py b/nisext/sexts.py index 37a8adcc7c..602572280d 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -260,7 +260,7 @@ def run(self): fobj.write(bat_contents) -class Bunch(object): +class Bunch: def __init__(self, vars): for key, name in vars.items(): if key.startswith('__'): From 65b3544126231e3177a0b3b9896b525f539747e9 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 20 Nov 2022 21:24:26 +0100 Subject: [PATCH 059/702] DOC: fix typos found by codespell --- doc/source/devel/data_pkg_discuss.rst | 8 ++++---- doc/source/devel/spm_use.rst | 2 +- doc/source/dicom/dcm2nii_algorithms.rst | 2 +- doc/source/dicom/spm_dicom.rst | 4 ++-- doc/source/installation.rst | 2 +- doc/source/old/examples.txt | 2 +- nibabel/data.py | 2 +- nibabel/pkg_info.py | 2 +- nibabel/tests/test_analyze.py | 2 +- nibabel/tests/test_api_validators.py | 2 +- nibabel/tests/test_fileslice.py | 2 +- nibabel/tests/test_processing.py | 2 +- nibabel/tests/test_wrapstruct.py | 2 +- 13 files changed, 17 insertions(+), 17 deletions(-) diff --git a/doc/source/devel/data_pkg_discuss.rst b/doc/source/devel/data_pkg_discuss.rst index dbd8cca88a..f983351925 100644 --- a/doc/source/devel/data_pkg_discuss.rst +++ b/doc/source/devel/data_pkg_discuss.rst @@ -367,14 +367,14 @@ Discovery .. [#tag-sources] - Revsion ids could for example be hashes of the package instantiation + Revision ids could for example be hashes of the package instantiation (package contents), so they could be globally unique to the contents, wherever the contents was when the identifier was made. However, *tags* - are just names that someone has attached to a particular revsion id. If + are just names that someone has attached to a particular revision id. If there is more than one person providing versions of a particular package, - there may not be agreement on the revsion that a particular tag is attached + there may not be agreement on the revision that a particular tag is attached to. For example, I might think that ``release-0.3`` of ``some-package`` - refers to package state identified by revsion id ``af5bd6``, but you might + refers to package state identified by revision id ``af5bd6``, but you might think that ``release-0.3`` of ``some-package`` refers to some other package state. In this case you and are are both a *tag sources* for the package. The state that particular tag refers to can depend then on the source from diff --git a/doc/source/devel/spm_use.rst b/doc/source/devel/spm_use.rst index a4e6d8dbfd..56c7051696 100644 --- a/doc/source/devel/spm_use.rst +++ b/doc/source/devel/spm_use.rst @@ -291,7 +291,7 @@ from volumes, as long as the transform is an affine. Miscellaneous functions operating on vol structs: -* ``spm_conv_vol`` - convolves volume with seperable functions in x, y, z +* ``spm_conv_vol`` - convolves volume with separable functions in x, y, z * ``spm_render_vol`` - does a projection of a volume onto a surface * ``spm_vol_check`` - takes array of vol structs and checks for sameness of image dimensions and ``mat`` (affines) across the list. diff --git a/doc/source/dicom/dcm2nii_algorithms.rst b/doc/source/dicom/dcm2nii_algorithms.rst index ac4ab3e4da..809ac51c51 100644 --- a/doc/source/dicom/dcm2nii_algorithms.rst +++ b/doc/source/dicom/dcm2nii_algorithms.rst @@ -18,7 +18,7 @@ Compiling dcm2nii ================= Follow the download / install instructions at the -http://www.lazarus.freepascal.org/ site. I was on a Mac, and folowed the +http://www.lazarus.freepascal.org/ site. I was on a Mac, and followed the instructions here: http://wiki.lazarus.freepascal.org/Installing_Lazarus_on_MacOS_X . Default build with version 0.9.28.2 gave an error linking against Carbon, so I needed to diff --git a/doc/source/dicom/spm_dicom.rst b/doc/source/dicom/spm_dicom.rst index 8c89e0ecda..67b6bcf0ca 100644 --- a/doc/source/dicom/spm_dicom.rst +++ b/doc/source/dicom/spm_dicom.rst @@ -80,8 +80,8 @@ There's a check for not-even tag length. If not even: #. 13 appears to mean 10 and is reset to be 10 #. Any other odd number is not valid and gives a tag length of 0 -``SQ`` VR type (Sequnce of items type) --------------------------------------- +``SQ`` VR type (Sequence of items type) +--------------------------------------- tag length of 13 set to tag length 10. diff --git a/doc/source/installation.rst b/doc/source/installation.rst index ffc5f7f47c..5e6009f7ae 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -43,7 +43,7 @@ Windows), and type:: This will download and install NiBabel. -If you really like doing stuff manually, you can install NiBabel by downoading +If you really like doing stuff manually, you can install NiBabel by downloading the source from `NiBabel pypi`_ . Go to the pypi page and select the source distribution you want. Download the distribution, unpack it, and then, from the unpacked directory, run:: diff --git a/doc/source/old/examples.txt b/doc/source/old/examples.txt index 19a44d9cb0..dfbc2b4d9e 100644 --- a/doc/source/old/examples.txt +++ b/doc/source/old/examples.txt @@ -80,7 +80,7 @@ the desired values: >>> print nim.header['dim'] [ 4 32 32 16 100 1 1 1] -First value shows the number of dimensions in the datset: 4 (good, that's what +First value shows the number of dimensions in the dataset: 4 (good, that's what we wanted). The following numbers are dataset size on the x, y, z, t, u, v, w axis (NIfTI files can handle up to 7 dimensions). diff --git a/nibabel/data.py b/nibabel/data.py index 438cd2aa58..52be86fd43 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -76,7 +76,7 @@ def list_files(self, relative=True): Parameters ---------- relative: bool, optional - If True, path returned are relative to the base paht of + If True, path returned are relative to the base path of the data source. Returns diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 43b39f4e89..e28cc6e28d 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -32,7 +32,7 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): version_str : str Version string to compare to current package version pkg_version_str : str, optional - Version of our package. Optional, set fom ``__version__`` by default. + Version of our package. Optional, set from ``__version__`` by default. Returns ------- diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index bbcdfbeefc..28aebd35a1 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -656,7 +656,7 @@ def as_analyze_map(self): exp_hdr['cal_min'] = -100 exp_hdr['cal_max'] = 100 assert klass.from_header(H5()) == exp_hdr - # set_* methods override fields fron header + # set_* methods override fields from header class H6(H5): diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 76043348c9..782a7915fc 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -34,7 +34,7 @@ class ValidateAPI(metaclass=validator2test): Your job is twofold: - * define an ``obj_params`` iteratable, where the iterator returns (``obj``, + * define an ``obj_params`` iterable, where the iterator returns (``obj``, ``params``) pairs. ``obj`` is something that you want to validate against an API. ``params`` is a mapping giving parameters for this object to test against. diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index e33c1f6814..35c61e149b 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -298,7 +298,7 @@ def test_optimize_slicer(): # be 'full' assert optimize_slicer(slice(9), 10, False, False, 4, _always) == (slice(0, 9, 1), slice(None)) assert optimize_slicer(slice(9), 10, True, False, 4, _always) == (slice(None), slice(0, 9, 1)) - # Unless this is the slowest dimenion, and all_true is True, in which case + # Unless this is the slowest dimension, and all_true is True, in which case # we don't update to full assert optimize_slicer(slice(9), 10, True, True, 4, _always) == (slice(0, 9, 1), slice(None)) # Nor if the heuristic won't update diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index 401966faa9..3c2a70a8c4 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -83,7 +83,7 @@ def test_adapt_affine(): [ 3, 4, 12], [ 6, 7, 13], [ 0, 0, 1]]) - # For 4x4 affine, 1D image, 2 dropped columnn + # For 4x4 affine, 1D image, 2 dropped columns assert_array_equal(adapt_affine(aff_3d, 1), [[ 0, 11], [ 3, 12], diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 011e16d47d..a360804f5a 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -213,7 +213,7 @@ def test_endian_guess(self): assert eh_swapped.endianness == swapped_code def test_binblock_is_file(self): - # Checks that the binary string respresentation is the whole of the + # Checks that the binary string representation is the whole of the # header file. This is true for Analyze types, but not true Nifti # single file headers, for example, because they will have extension # strings following. More generally, there may be other perhaps From f3fab151c85bd0ab8741afe8fdb8795df63912d3 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 19:51:15 +0100 Subject: [PATCH 060/702] MNT: Apply refurb suggestion [FURB125]: Return is redundant here --- nibabel/cmdline/dicomfs.py | 3 --- nibabel/dft.py | 9 --------- nibabel/wrapstruct.py | 1 - nisext/sexts.py | 5 ++--- nisext/testers.py | 1 - 5 files changed, 2 insertions(+), 17 deletions(-) diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 9b1b735cca..af7bd96d3e 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -48,7 +48,6 @@ def __init__(self, fno): self.fno = fno self.keep_cache = False self.direct_io = False - return def __str__(self): return 'FileHandle(%d)' % self.fno @@ -64,7 +63,6 @@ def __init__(self, *args, **kwargs): self.dicom_path = kwargs.pop('dicom_path', None) fuse.Fuse.__init__(self, *args, **kwargs) self.fhs = {} - return def get_paths(self): paths = {} @@ -190,7 +188,6 @@ def release(self, path, flags, fh): logger.debug(path) logger.debug(fh) del self.fhs[fh.fno] - return def get_opt_parser(): diff --git a/nibabel/dft.py b/nibabel/dft.py index 3de1b31254..f47d70ccb6 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -51,7 +51,6 @@ def __init__(self, series, i, si): self.series = series self.i = i self.si = si - return def __str__(self): fmt = 'expecting instance number %d, got %d' @@ -70,7 +69,6 @@ def __init__(self, d): self.patient_birth_date = d['patient_birth_date'] self.patient_sex = d['patient_sex'] self.series = None - return def __getattribute__(self, name): val = object.__getattribute__(self, name) @@ -103,7 +101,6 @@ def __init__(self, d): self.bits_allocated = d['bits_allocated'] self.bits_stored = d['bits_stored'] self.storage_instances = None - return def __getattribute__(self, name): val = object.__getattribute__(self, name) @@ -226,7 +223,6 @@ def __init__(self, d): self.instance_number = d['instance_number'] self.series = d['series'] self.files = None - return def __getattribute__(self, name): val = object.__getattribute__(self, name) @@ -256,7 +252,6 @@ def __exit__(self, type, value, traceback): if type is None: self.c.close() DB.rollback() - return class _db_change: @@ -272,7 +267,6 @@ def __exit__(self, type, value, traceback): DB.commit() else: DB.rollback() - return def _get_subdirs(base_dir, files_dict=None, followlinks=False): @@ -316,7 +310,6 @@ def update_cache(base_dir, followlinks=False): else: query = "INSERT INTO directory (path, mtime) VALUES (?, ?)" c.execute(query, (dir, mtimes[dir])) - return def get_studies(base_dir=None, followlinks=False): @@ -382,7 +375,6 @@ def _update_dir(c, dir, files, studies, series, storage_instances): SET mtime = ?, storage_instance = ? WHERE directory = ? AND name = ?""" c.execute(query, (mtime, si_uid, dir, fname)) - return def _update_file(c, path, fname, studies, series, storage_instances): @@ -457,7 +449,6 @@ def clear_cache(): c.execute("DELETE FROM storage_instance") c.execute("DELETE FROM series") c.execute("DELETE FROM study") - return CREATE_QUERIES = ( diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index c0c13710d6..b933892565 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -172,7 +172,6 @@ def __init__(self, self._structarr = wstr.copy() if check: self.check_fix() - return @classmethod def from_fileobj(klass, fileobj, endianness=None, check=True): diff --git a/nisext/sexts.py b/nisext/sexts.py index 602572280d..6ececdac78 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -183,9 +183,8 @@ def version_getter(pkg_name): _add_append_key(setuptools_args['extras_require'], optional, dependency) - return - _add_append_key(setuptools_args, 'install_requires', dependency) - return + else: + _add_append_key(setuptools_args, 'install_requires', dependency) def _package_status(pkg_name, version, version_getter, checker): diff --git a/nisext/testers.py b/nisext/testers.py index df26c7af39..05b2d92a3e 100644 --- a/nisext/testers.py +++ b/nisext/testers.py @@ -277,7 +277,6 @@ def contexts_print_info(mod_name, repo_path, install_path): print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0]) # test from development tree print(run_mod_cmd(mod_name, repo_path, cmd_str)[0]) - return def info_from_here(mod_name): From 0690ea3fb3200fd7f8b2bb6d1395d05f06256c53 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 20:01:02 +0100 Subject: [PATCH 061/702] MNT: Apply refurb suggestion [FURB115]: Replace `len(x) > 0` with `x` --- nibabel/streamlines/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 8dfe96f927..12e777d754 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -129,7 +129,7 @@ def save(tractogram, filename, **kwargs): " 'TractogramFile' object.") warnings.warn(msg, ExtensionWarning) - if len(kwargs) > 0: + if kwargs: msg = ("A 'TractogramFile' object was provided, no need for" " keyword arguments.") raise ValueError(msg) From 4c9a12916b29b161c6d784e97ef12d38bbc4fd57 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 20:20:07 +0100 Subject: [PATCH 062/702] MNT: Apply refurb suggestion [FURB113]: Use `x.extend(...)` instead of repeatedly calling `x.append()` --- nibabel/cifti2/tests/test_cifti2.py | 3 +-- nibabel/cifti2/tests/test_new_cifti2.py | 33 +++++++++---------------- nibabel/cmdline/dicomfs.py | 3 +-- nibabel/streamlines/tck.py | 13 +++++----- nibabel/tests/data/gen_standard.py | 16 ++++++------ 5 files changed, 27 insertions(+), 41 deletions(-) diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index fc64c34554..b04d1db585 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -326,8 +326,7 @@ def test_matrixindicesmap(): parcel = ci.Cifti2Parcel() assert mim.volume is None - mim.append(volume) - mim.append(parcel) + mim.extend((volume, parcel)) assert mim.volume == volume diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 65ef95c316..a49ba79d52 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -232,8 +232,7 @@ def test_dtseries(): series_map = create_series_map((0, )) geometry_map = create_geometry_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(series_map) - matrix.append(geometry_map) + matrix.extend((series_map, geometry_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(13, 10) img = ci.Cifti2Image(data, hdr) @@ -254,8 +253,7 @@ def test_dscalar(): scalar_map = create_scalar_map((0, )) geometry_map = create_geometry_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(scalar_map) - matrix.append(geometry_map) + matrix.extend((scalar_map, geometry_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(2, 10) img = ci.Cifti2Image(data, hdr) @@ -276,8 +274,7 @@ def test_dlabel(): label_map = create_label_map((0, )) geometry_map = create_geometry_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(label_map) - matrix.append(geometry_map) + matrix.extend((label_map, geometry_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(2, 10) img = ci.Cifti2Image(data, hdr) @@ -318,8 +315,7 @@ def test_ptseries(): series_map = create_series_map((0, )) parcel_map = create_parcel_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(series_map) - matrix.append(parcel_map) + matrix.extend((series_map, parcel_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(13, 4) img = ci.Cifti2Image(data, hdr) @@ -340,8 +336,7 @@ def test_pscalar(): scalar_map = create_scalar_map((0, )) parcel_map = create_parcel_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(scalar_map) - matrix.append(parcel_map) + matrix.extend((scalar_map, parcel_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(2, 4) img = ci.Cifti2Image(data, hdr) @@ -362,8 +357,7 @@ def test_pdconn(): geometry_map = create_geometry_map((0, )) parcel_map = create_parcel_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(geometry_map) - matrix.append(parcel_map) + matrix.extend((geometry_map, parcel_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(10, 4) img = ci.Cifti2Image(data, hdr) @@ -384,8 +378,7 @@ def test_dpconn(): parcel_map = create_parcel_map((0, )) geometry_map = create_geometry_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(parcel_map) - matrix.append(geometry_map) + matrix.extend((parcel_map, geometry_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 10) img = ci.Cifti2Image(data, hdr) @@ -406,8 +399,7 @@ def test_plabel(): label_map = create_label_map((0, )) parcel_map = create_parcel_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(label_map) - matrix.append(parcel_map) + matrix.extend((label_map, parcel_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(2, 4) img = ci.Cifti2Image(data, hdr) @@ -448,8 +440,7 @@ def test_pconnseries(): series_map = create_series_map((2, )) matrix = ci.Cifti2Matrix() - matrix.append(parcel_map) - matrix.append(series_map) + matrix.extend((parcel_map, series_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 13) img = ci.Cifti2Image(data, hdr) @@ -473,8 +464,7 @@ def test_pconnscalar(): scalar_map = create_scalar_map((2, )) matrix = ci.Cifti2Matrix() - matrix.append(parcel_map) - matrix.append(scalar_map) + matrix.extend((parcel_map, scalar_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 2) img = ci.Cifti2Image(data, hdr) @@ -499,8 +489,7 @@ def test_wrong_shape(): brain_model_map = create_geometry_map((1, )) matrix = ci.Cifti2Matrix() - matrix.append(scalar_map) - matrix.append(brain_model_map) + matrix.extend((scalar_map, brain_model_map)) hdr = ci.Cifti2Header(matrix) # correct shape is (2, 10) diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index af7bd96d3e..33532cf8e7 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -117,8 +117,7 @@ def readdir(self, path, fh): return -errno.ENOENT logger.debug(f'matched {matched_path}') fnames = [k.encode('ascii', 'replace') for k in matched_path.keys()] - fnames.append('.') - fnames.append('..') + fnames.extend(('.', '..')) return [fuse.Direntry(f) for f in fnames] def getattr(self, path): diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index c9bba94a6e..37bdbe3ffb 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -249,12 +249,13 @@ def _write_header(fileobj, header): Field.VOXEL_TO_RASMM, # Streamlines are always in RAS+ mm. "count", "datatype", "file"] # Fields being replaced. - lines = [] - lines.append(f"count: {header[Field.NB_STREAMLINES]:010}") - lines.append("datatype: Float32LE") # Always Float32LE. - lines.extend([f"{k}: {v}" - for k, v in header.items() - if k not in exclude and not k.startswith("_")]) + lines = [ + f"count: {header[Field.NB_STREAMLINES]:010}", + "datatype: Float32LE", # Always Float32LE. + ] + lines.extend(f"{k}: {v}" + for k, v in header.items() + if k not in exclude and not k.startswith("_")) out = "\n".join(lines) # Check the header is well formatted. diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index f966b5599d..0e532bd123 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -35,19 +35,17 @@ def _gen_straight_streamline(start, end, steps=3): return np.array(coords).T # Generate a 3D 'X' template fitting inside the voxel centered at (0,0,0). - X = [] - X.append(_gen_straight_streamline((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5))) - X.append(_gen_straight_streamline((-0.5, 0.5, -0.5), (0.5, -0.5, 0.5))) - X.append(_gen_straight_streamline((-0.5, 0.5, 0.5), (0.5, -0.5, -0.5))) - X.append(_gen_straight_streamline((-0.5, -0.5, 0.5), (0.5, 0.5, -0.5))) + X = [ + _gen_straight_streamline((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)), + _gen_straight_streamline((-0.5, 0.5, -0.5), (0.5, -0.5, 0.5)), + _gen_straight_streamline((-0.5, 0.5, 0.5), (0.5, -0.5, -0.5)), + _gen_straight_streamline((-0.5, -0.5, 0.5), (0.5, 0.5, -0.5)), + ] # Get the coordinates of voxels 'on' in the mask. coords = np.array(zip(*np.where(mask))) - streamlines = [] - for c in coords: - for line in X: - streamlines.append((line + c) * voxel_size) + streamlines = [(line + c) * voxel_size for line in X for c in coords] return streamlines From 59825e446bac53f6b290870d21991d0e8370cf59 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 20:27:36 +0100 Subject: [PATCH 063/702] MNT: Apply refurb suggestion [FURB110]: Use `x or y` instead of `x if x else y` --- nibabel/deprecator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 3ff9516aec..81e93d868e 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -161,8 +161,8 @@ def __call__(self, message, since='', until='', deprecator : func Function returning a decorator. """ - warn_class = warn_class if warn_class else self.warn_class - error_class = error_class if error_class else self.error_class + warn_class = warn_class or self.warn_class + error_class = error_class or self.error_class messages = [message] if (since, until) != ('', ''): messages.append('') From 39f124133a3c7e1c4ba71bebcbd5189aadd7ec19 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 20:29:19 +0100 Subject: [PATCH 064/702] MNT: Apply refurb suggestion [FURB123]: Replace `int(x)` with `x` --- nibabel/cmdline/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index c03f97277d..e6aa0a2fb5 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -32,7 +32,7 @@ def verbose(thing, msg): """Print `s` if `thing` is less than the `verbose_level` """ # TODO: consider using nibabel's logger - if thing <= int(verbose_level): + if thing <= verbose_level: print(' ' * thing + msg) From 7e8f78e9021757d825fe7a286fb470bfbdf5f928 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 20:36:00 +0100 Subject: [PATCH 065/702] MNT: Apply refurb suggestion [FURB108]: Use `x in (y, z)` instead of `x == y or x == z` --- nibabel/tests/test_volumeutils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 4994f94e48..9c06207bd4 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -947,9 +947,7 @@ def test_seek_tell(): # zeros in their wake. BZ2Files can't seek when writing, # unless we enable the write0 flag to seek_tell # ZstdFiles also does not support seek forward on write - if (not write0 and - (in_file == 'test.bz2' or - in_file == 'test.zst')): # Can't seek write in bz2, zst + if (not write0 and in_file in ('test.bz2', 'test.zst')): # write the zeros by hand for the read test below fobj.write(b'\x00' * diff) else: From 4769e0c8221b790f8ddd02e4b543c4ac67068fda Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 20:37:43 +0100 Subject: [PATCH 066/702] MNT: Apply refurb suggestion [FURB118]: Replace lambda with `operator.eq` --- nibabel/nicom/dicomwrappers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 85e1655bc7..d1ca3ee173 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -737,7 +737,7 @@ def series_signature(self): ice = csar.get_ice_dims(self.csa_header) if ice is not None: ice = ice[:6] + ice[8:9] - signature['ICE_Dims'] = (ice, lambda x, y: x == y) + signature['ICE_Dims'] = (ice, operator.eq) return signature @one_time From fe78b60495cf39ceb6898e5df1969995dfdbd001 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 21:36:02 +0100 Subject: [PATCH 067/702] Update nibabel/tests/test_volumeutils.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_volumeutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 9c06207bd4..287bd60105 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -947,7 +947,7 @@ def test_seek_tell(): # zeros in their wake. BZ2Files can't seek when writing, # unless we enable the write0 flag to seek_tell # ZstdFiles also does not support seek forward on write - if (not write0 and in_file in ('test.bz2', 'test.zst')): + if not write0 and in_file in ('test.bz2', 'test.zst'): # write the zeros by hand for the read test below fobj.write(b'\x00' * diff) else: From ad30f0ec472ccde84f788f347da297edc6870b91 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 21:42:46 +0100 Subject: [PATCH 068/702] Update nibabel/tests/data/gen_standard.py Co-authored-by: Chris Markiewicz --- nibabel/tests/data/gen_standard.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index 0e532bd123..477e687224 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -45,7 +45,7 @@ def _gen_straight_streamline(start, end, steps=3): # Get the coordinates of voxels 'on' in the mask. coords = np.array(zip(*np.where(mask))) - streamlines = [(line + c) * voxel_size for line in X for c in coords] + streamlines = [(line + c) * voxel_size for c in coords for line in X] return streamlines From 5529d857fef033f87de4a62283908dee7c6e1593 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 12 Dec 2022 21:29:34 +0100 Subject: [PATCH 069/702] MNT: Apply pyupgrade suggestions --- doc/tools/apigen.py | 4 ++-- nibabel/cifti2/cifti2.py | 4 ++-- nibabel/cifti2/tests/test_axes.py | 6 ++---- nibabel/cifti2/tests/test_cifti2io_header.py | 2 +- nibabel/cmdline/ls.py | 2 +- nibabel/cmdline/parrec2nii.py | 2 +- nibabel/ecat.py | 2 +- nibabel/filebasedimages.py | 4 ++-- nibabel/freesurfer/io.py | 2 +- nibabel/gifti/gifti.py | 6 +++--- nibabel/nicom/dicomreaders.py | 2 +- nibabel/nicom/tests/test_dicomreaders.py | 2 +- nibabel/nifti1.py | 2 +- nibabel/nifti2.py | 2 +- nibabel/spm99analyze.py | 2 +- nibabel/streamlines/__init__.py | 2 +- nibabel/tests/test_arrayproxy.py | 2 +- nibabel/tests/test_ecat.py | 2 +- nibabel/tests/test_loadsave.py | 2 +- nibabel/tests/test_volumeutils.py | 16 +++++++--------- nibabel/volumeutils.py | 10 +++++----- 21 files changed, 37 insertions(+), 41 deletions(-) diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index cabbd319c5..68d8f68749 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -42,7 +42,7 @@ def __init__(self, module_skip_patterns=None, other_defines=True ): - """ Initialize package for parsing + r""" Initialize package for parsing Parameters ---------- @@ -358,7 +358,7 @@ def _survives_exclude(self, matchstr, match_type): return True def discover_modules(self): - """ Return module sequence discovered from ``self.package_name`` + r""" Return module sequence discovered from ``self.package_name`` Parameters diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index b75fd01db9..31d631bb5f 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -648,8 +648,8 @@ def __init__(self, name=None, voxel_indices_ijk=None, vertices=None): self.vertices = vertices if vertices is not None else [] for val in self.vertices: if not isinstance(val, Cifti2Vertices): - raise ValueError(('Cifti2Parcel vertices must be instances of ' - 'Cifti2Vertices')) + raise ValueError('Cifti2Parcel vertices must be instances of ' + 'Cifti2Vertices') @property def voxel_indices_ijk(self): diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index 3f6cb3a1a4..21cd83e80e 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -93,10 +93,8 @@ def get_axes(): yield get_parcels() yield get_scalar() yield get_label() - for elem in get_brain_models(): - yield elem - for elem in get_series(): - yield elem + yield from get_brain_models() + yield from get_series() def test_brain_models(): diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index c4e0682af4..541ceaa30c 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -50,7 +50,7 @@ def test_read_nifti2(): # Error trying to read a CIFTI-2 image from a NIfTI2-only image. filemap = ci.Cifti2Image.make_file_map() for k in filemap: - filemap[k].fileobj = io.open(NIFTI2_DATA) + filemap[k].fileobj = open(NIFTI2_DATA) with pytest.raises(ValueError): ci.Cifti2Image.from_file_map(filemap) diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 91e55860f4..2995ff58c5 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -145,7 +145,7 @@ def proc_file(f, opts): freq = np.bincount(inv) counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq)) row += ["@l" + counts] - except IOError as e: + except OSError as e: verbose(2, f"Failed to obtain stats/counts -- {e}") row += [_err()] return row diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index bf8146e8f9..0f868bd06b 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -158,7 +158,7 @@ def proc_file(infile, opts): else: outfilename = basefilename + '.nii' if os.path.isfile(outfilename) and not opts.overwrite: - raise IOError(f'Output file "{outfilename}" exists, use --overwrite to overwrite it') + raise OSError(f'Output file "{outfilename}" exists, use --overwrite to overwrite it') # load the PAR header and data scaling = 'dv' if opts.scaling == 'off' else opts.scaling diff --git a/nibabel/ecat.py b/nibabel/ecat.py index f206e482de..1d7bddaa16 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -471,7 +471,7 @@ def get_series_framenumbers(mlist): try: frame_dict[frame_stored] = trueframenumbers[true_order] + 1 except IndexError: - raise IOError('Error in header or mlist order unknown') + raise OSError('Error in header or mlist order unknown') return frame_dict diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 180fe7c6a7..5162263a28 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -430,7 +430,7 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): try: with ImageOpener(meta_fname, 'rb') as fobj: binaryblock = fobj.read(sniff_nbytes) - except (IOError, EOFError): + except (OSError, EOFError): return None return (binaryblock, meta_fname) @@ -564,7 +564,7 @@ def from_stream(klass, io_obj: io.IOBase): return klass.from_file_map(klass._filemap_from_iobase(io_obj)) def to_stream(self, io_obj: io.IOBase, **kwargs): - """Save image to writable IO stream + r"""Save image to writable IO stream Parameters ---------- diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index e2f48d88eb..77f7fe892a 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -68,7 +68,7 @@ def _read_volume_info(fobj): 'zras', 'cras']: pair = fobj.readline().decode('utf-8').split('=') if pair[0].strip() != key or len(pair) != 2: - raise IOError('Error parsing volume info.') + raise OSError('Error parsing volume info.') if key in ('valid', 'filename'): volume_info[key] = pair[1].strip() elif key == 'volume': diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 6082d3739b..6fbc9245a7 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -154,7 +154,7 @@ class GiftiNVPairs: 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' 'as a dict, instead.', '4.0', '6.0') - def __init__(self, name=u'', value=u''): + def __init__(self, name='', value=''): self._name = name self._value = value self._container = None @@ -480,7 +480,7 @@ def __init__(self, coordsys=None, ordering="C", meta=None, - ext_fname=u'', + ext_fname='', ext_offset=0): """ Returns a shell object that cannot be saved. @@ -702,7 +702,7 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): parser = None def __init__(self, header=None, extra=None, file_map=None, meta=None, - labeltable=None, darrays=None, version=u"1.0"): + labeltable=None, darrays=None, version="1.0"): super(GiftiImage, self).__init__(header=header, extra=extra, file_map=file_map) if darrays is None: diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index e4fbc625ab..56d7d56946 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -83,7 +83,7 @@ def read_mosaic_dir(dicom_path, gradients = [] arrays = [] if len(filenames) == 0: - raise IOError(f'Found no files with "{full_globber}"') + raise OSError(f'Found no files with "{full_globber}"') for fname in filenames: dcm_w = wrapper_from_file(fname, **dicom_kwargs) # Because the routine sorts by filename, it only makes sense to use diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index 0ce7f8de2e..b1ae9edae9 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -30,7 +30,7 @@ def test_read_dwis(): assert_array_almost_equal(aff, EXPECTED_AFFINE) assert_array_almost_equal(bs, (0, EXPECTED_PARAMS[0])) assert_array_almost_equal(gs, (np.zeros((3,)), EXPECTED_PARAMS[1])) - with pytest.raises(IOError): + with pytest.raises(OSError): didr.read_mosaic_dwi_dir('improbable') diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index d434e50c32..1bffac10ce 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2269,7 +2269,7 @@ def load(filename): ------ ImageFileError if `filename` doesn't look like NIfTI1; - IOError + OSError if `filename` does not exist. """ try: diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 8c58569d96..10e789d076 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -265,7 +265,7 @@ def load(filename): ------ ImageFileError if `filename` doesn't look like nifti2; - IOError + OSError if `filename` does not exist. """ try: diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 687a94da5a..b858a5efff 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -278,7 +278,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): file_map, mmap=mmap, keep_file_open=keep_file_open) try: matf = file_map['mat'].get_prepare_fileobj() - except IOError: + except OSError: return ret # Allow for possibility of empty file -> no update to affine with matf: diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 8dfe96f927..8767a2a48f 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -53,7 +53,7 @@ def detect_format(fileobj): try: if format.is_correct_format(fileobj): return format - except IOError: + except OSError: pass if isinstance(fileobj, str): diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 6857fdb1f7..eb296b516f 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -440,7 +440,7 @@ def _count_ImageOpeners(proxy, data, voxels): CountingImageOpener.num_openers = 0 # expected data is defined in the test_keep_file_open_* tests for i in range(voxels.shape[0]): - x, y, z = [int(c) for c in voxels[i, :]] + x, y, z = (int(c) for c in voxels[i, :]) assert proxy[x, y, z] == x * 100 + y * 10 + z return CountingImageOpener.num_openers diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 607345e473..3a48f204d3 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -144,7 +144,7 @@ def test_mlist_errors(self): neworder = [frames_order[x][0] for x in sorted(frames_order)] assert neworder == [1, 2, 3, 4, 5] with suppress_warnings(): - with pytest.raises(IOError): + with pytest.raises(OSError): get_series_framenumbers(mlist) diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index f2cf0242d5..c58b95d8e8 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -139,7 +139,7 @@ def test_read_img_data_nifti(): img.file_map = img.filespec_to_file_map(froot) # Trying to read from this filemap will generate an error because # we are going to read from files that do not exist - with pytest.deprecated_call(), pytest.raises(IOError): + with pytest.deprecated_call(), pytest.raises(OSError): read_img_data(img) img.to_file_map() # Load - now the scaling and offset correctly applied diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 4994f94e48..6babe649fb 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -165,16 +165,14 @@ def test_array_from_file(): arr = array_from_file((0,), np.dtype('f8'), BytesIO()) assert len(arr) == 0 # Check error from small file - with pytest.raises(IOError): + with pytest.raises(OSError): array_from_file(shape, dtype, BytesIO()) # check on real file fd, fname = tempfile.mkstemp() with InTemporaryDirectory(): open(fname, 'wb').write(b'1') in_buf = open(fname, 'rb') - # For windows this will raise a WindowsError from mmap, Unices - # appear to raise an IOError - with pytest.raises(Exception): + with pytest.raises(OSError): array_from_file(shape, dtype, in_buf) del in_buf @@ -983,7 +981,7 @@ def test_seek_tell(): assert fobj.tell() == 10 seek_tell(fobj, 10) assert fobj.tell() == 10 - with pytest.raises(IOError): + with pytest.raises(OSError): seek_tell(fobj, 5) # Make sure read seeks don't affect file with ImageOpener(in_file, 'rb') as fobj: @@ -1003,10 +1001,10 @@ def test_seek_tell_logic(): class BabyBio(BytesIO): def seek(self, *args): - raise IOError() + raise OSError() bio = BabyBio() # Fresh fileobj, position 0, can't seek - error - with pytest.raises(IOError): + with pytest.raises(OSError): bio.seek(10) # Put fileobj in correct position by writing ZEROB = b'\x00' @@ -1015,7 +1013,7 @@ def seek(self, *args): assert bio.tell() == 10 assert bio.getvalue() == ZEROB * 10 # Try write zeros to get to new position - with pytest.raises(IOError): + with pytest.raises(OSError): bio.seek(20) seek_tell(bio, 20, write0=True) assert bio.getvalue() == ZEROB * 20 @@ -1206,7 +1204,7 @@ def read(self, n_bytes): return b'' try: array_from_file(shape, np.int8, NoStringIO()) - except IOError as err: + except OSError as err: message = str(err) assert message == ("Expected 11390625000000000000 bytes, got 0 " "bytes from object\n - could the file be damaged?") diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index dc82287dbb..4e22c6ce29 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -465,7 +465,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): n_read = len(data_bytes) needs_copy = True if n_bytes != n_read: - raise IOError(f"Expected {n_bytes} bytes, got {n_read} bytes from " + raise OSError(f"Expected {n_bytes} bytes, got {n_read} bytes from " f"{getattr(infile, 'name', 'object')}\n - could the file be damaged?") arr = np.ndarray(shape, in_dtype, buffer=data_bytes, order=order) if needs_copy: @@ -822,7 +822,7 @@ def seek_tell(fileobj, offset, write0=False): Parameters ---------- fileobj : file-like - object implementing ``seek`` and (if seek raises an IOError) ``tell`` + object implementing ``seek`` and (if seek raises an OSError) ``tell`` offset : int position in file to which to seek write0 : {False, True}, optional @@ -832,16 +832,16 @@ def seek_tell(fileobj, offset, write0=False): """ try: fileobj.seek(offset) - except IOError as e: + except OSError as e: # This can be a negative seek in write mode for gz file object or any # seek in write mode for a bz2 file object pos = fileobj.tell() if pos == offset: return if not write0: - raise IOError(str(e)) + raise OSError(str(e)) if pos > offset: - raise IOError("Can't write to seek backwards") + raise OSError("Can't write to seek backwards") fileobj.write(b'\x00' * (offset - pos)) assert fileobj.tell() == offset From efb8ce905960fb6b86e1584b2fc763758aea7046 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 13 Dec 2022 07:32:25 +0100 Subject: [PATCH 070/702] BF: Fix error message --- nibabel/ecat.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 1d7bddaa16..494ea95e2d 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -611,8 +611,7 @@ def _get_oriented_data(self, raw_data, orientation=None): elif orientation == 'radiological': orientation = patient_orient_radiological[0] else: - raise ValueError('orientation should be None,\ - neurological or radiological') + raise ValueError('orientation should be None, neurological or radiological') if orientation in patient_orient_neurological: raw_data = raw_data[::-1, ::-1, ::-1] From 3232a8af89392c989ff9d4ede5af34a6ebe077ed Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 7 Nov 2022 09:48:07 -0500 Subject: [PATCH 071/702] CI: Check Python 3.11 for pydicom@master --- .github/workflows/pre-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 4979713fe0..a8bc85a018 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -36,7 +36,7 @@ jobs: include: # Pydicom master - os: ubuntu-latest - python-version: "3.10" + python-version: "3.11" install: pip check: test pip-flags: '' From 40197a28e4bc5bb5e2f593da8505412d6beda2c0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 15 Dec 2022 20:35:58 -0500 Subject: [PATCH 072/702] CI: Test on Python 3.11 --- .github/workflows/pre-release.yml | 2 +- .github/workflows/stable.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index a8bc85a018..dd8d15c7e6 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: ["3.9", "3.10"] + python-version: ["3.9", "3.10", "3.11"] architecture: ['x64', 'x86'] install: ['pip'] check: ['test'] diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 9086dab9d9..ace5fa2622 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -84,7 +84,7 @@ jobs: strategy: matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: [3.7, 3.8, 3.9, "3.10"] + python-version: [3.7, 3.8, 3.9, "3.10", "3.11"] architecture: ['x64', 'x86'] install: ['pip'] check: ['test'] From 0f921b0674bb4732f5267f154def820de0a2fccc Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 15 Dec 2022 20:36:12 -0500 Subject: [PATCH 073/702] CI: Add concurrency groups and permissions limitations --- .github/workflows/pre-release.yml | 7 +++++++ .github/workflows/stable.yml | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index dd8d15c7e6..9ceb4033ae 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -18,6 +18,13 @@ defaults: run: shell: bash +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + jobs: pre-release: # Check pre-releases of dependencies on stable Python diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index ace5fa2622..7cf7aaab43 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -24,6 +24,13 @@ defaults: run: shell: bash +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + jobs: build: runs-on: ubuntu-latest From 0a0582d138b2f7318cdf1fadb93c41029042967b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 15 Dec 2022 22:20:46 -0500 Subject: [PATCH 074/702] TEST: Tolerate distributing tests across workers --- nibabel/tests/test_api_validators.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index c5e19b268b..f1c592ce13 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,6 +1,7 @@ """ Metaclass and class for validating instance APIs """ - +import os +import pytest class validator2test(type): @@ -82,6 +83,10 @@ def validate_something(self, obj, params): assert obj.get_var() == params['var'] +@pytest.mark.xfail( + os.getenv("PYTEST_XDIST_WORKER") is not None, + reason="Execution in the same scope cannot be guaranteed" +) class TestRunAllTests(ValidateAPI): """ Class to test that each validator test gets run @@ -98,7 +103,7 @@ def validate_first(self, obj, param): def validate_second(self, obj, param): self.run_tests.append('second') - -def teardown(): - # Check that both validate_xxx tests got run - assert TestRunAllTests.run_tests == ['first', 'second'] + @classmethod + def teardown_class(cls): + # Check that both validate_xxx tests got run + assert cls.run_tests == ['first', 'second'] From 17b0ef0c16f2366e3b30a7891d6bb5e2c2a18fdd Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 15 Dec 2022 22:21:55 -0500 Subject: [PATCH 075/702] CI: Run tests with available parallelism --- setup.cfg | 1 + tools/ci/check.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 03e6e41171..ef1c140a29 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,6 +24,7 @@ test = pytest-cov pytest-doctestplus pytest-httpserver + pytest-xdist zstd = pyzstd >= 0.14.3 all = diff --git a/tools/ci/check.sh b/tools/ci/check.sh index e497833521..a96f0874a4 100755 --- a/tools/ci/check.sh +++ b/tools/ci/check.sh @@ -25,7 +25,7 @@ elif [ "${CHECK_TYPE}" == "test" ]; then cd for_testing cp ../.coveragerc . pytest --doctest-modules --doctest-plus --cov nibabel --cov-report xml \ - --junitxml=test-results.xml -v --pyargs nibabel + --junitxml=test-results.xml -v --pyargs nibabel -n auto else false fi From 2f94071547caa3db6017c7a72fe4b9d902271299 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:17:02 -0400 Subject: [PATCH 076/702] MNT: Set fallback version to 5.0.0.dev0 --- nibabel/info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/info.py b/nibabel/info.py index ad5d473f74..38690246c3 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -10,8 +10,8 @@ # This is a fall-back for versioneer when installing from a git archive. # This should be set to the intended next version + dev to indicate a # development (pre-release) version. -_version_major = 4 -_version_minor = 1 +_version_major = 5 +_version_minor = 0 _version_micro = 0 _version_extra = '.dev0' From 89929bbd0e52895962073262c5ea2f903750fd60 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:18:23 -0400 Subject: [PATCH 077/702] MNT: Remove nibabel.keywordonly --- nibabel/conftest.py | 2 -- nibabel/keywordonly.py | 34 --------------------- nibabel/tests/test_keywordonly.py | 49 ------------------------------- 3 files changed, 85 deletions(-) delete mode 100644 nibabel/keywordonly.py delete mode 100644 nibabel/tests/test_keywordonly.py diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 751afd9654..697eef4ad6 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -1,8 +1,6 @@ import pytest # Pre-load deprecated modules to avoid cluttering warnings -with pytest.deprecated_call(): - import nibabel.keywordonly with pytest.warns(FutureWarning): import nibabel.py3k diff --git a/nibabel/keywordonly.py b/nibabel/keywordonly.py deleted file mode 100644 index b6267d5010..0000000000 --- a/nibabel/keywordonly.py +++ /dev/null @@ -1,34 +0,0 @@ -""" Decorator for labeling keyword arguments as keyword only -""" - -from functools import wraps -import warnings - -warnings.warn("We will remove the 'keywordonly' module from nibabel 5.0. " - "Please use the built-in Python `*` argument to ensure " - "keyword-only parameters (see PEP 3102).", - DeprecationWarning, - stacklevel=2) - - -def kw_only_func(n): - """ Return function decorator enforcing maximum of `n` positional arguments - """ - def decorator(func): - @wraps(func) - def wrapper(*args, **kwargs): - if len(args) > n: - raise TypeError( - f"{func.__name__} takes at most {n} positional argument{'s' if n > 1 else ''}") - return func(*args, **kwargs) - return wrapper - return decorator - - -def kw_only_meth(n): - """ Return method decorator enforcing maximum of `n` positional arguments - - The method has at least one positional argument ``self`` or ``cls``; allow - for that. - """ - return kw_only_func(n + 1) diff --git a/nibabel/tests/test_keywordonly.py b/nibabel/tests/test_keywordonly.py deleted file mode 100644 index 0ef631dbf4..0000000000 --- a/nibabel/tests/test_keywordonly.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Test kw_only decorators """ - -from ..keywordonly import kw_only_func, kw_only_meth - -import pytest - - -def test_kw_only_func(): - # Test decorator - def func(an_arg): - "My docstring" - return an_arg - assert func(1) == 1 - with pytest.raises(TypeError): - func(1, 2) - dec_func = kw_only_func(1)(func) - assert dec_func(1) == 1 - with pytest.raises(TypeError): - dec_func(1, 2) - with pytest.raises(TypeError): - dec_func(1, akeyarg=3) - assert dec_func.__doc__ == 'My docstring' - - @kw_only_func(1) - def kw_func(an_arg, a_kwarg='thing'): - "Another docstring" - return an_arg, a_kwarg - assert kw_func(1) == (1, 'thing') - with pytest.raises(TypeError): - kw_func(1, 2) - assert kw_func(1, a_kwarg=2) == (1, 2) - with pytest.raises(TypeError): - kw_func(1, akeyarg=3) - assert kw_func.__doc__ == 'Another docstring' - - class C: - - @kw_only_meth(1) - def kw_meth(self, an_arg, a_kwarg='thing'): - "Method docstring" - return an_arg, a_kwarg - c = C() - assert c.kw_meth(1) == (1, 'thing') - with pytest.raises(TypeError): - c.kw_meth(1, 2) - assert c.kw_meth(1, a_kwarg=2) == (1, 2) - with pytest.raises(TypeError): - c.kw_meth(1, akeyarg=3) - assert c.kw_meth.__doc__ == 'Method docstring' From 494ad70c972701cd7e8d2dc85d3fc90f51a09b68 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:28:34 -0400 Subject: [PATCH 078/702] TEST: Bump some final removals to 6.0 --- nibabel/tests/test_removalschedule.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index d94b1d0f9d..0595e48360 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -17,10 +17,10 @@ ("6.0.0", [("nibabel.loadsave", "guessed_image_type"), ("nibabel.loadsave", "read_img_data"), ("nibabel.orientations", "flip_axis"), - ]), - ("5.0.0", [("nibabel.pydicom_compat", "dicom_test"), + ("nibabel.pydicom_compat", "dicom_test"), ("nibabel.onetime", "setattr_on_read"), - ("nibabel.gifti.gifti", "data_tag"), + ]), + ("5.0.0", [("nibabel.gifti.gifti", "data_tag"), ("nibabel.gifti.giftiio", "read"), ("nibabel.gifti.giftiio", "write"), ("nibabel.gifti.parse_gifti_fast", "Outputter"), From 3aaf8d14d0ddc770f566f0a28921e4aebfd5dc76 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:28:58 -0400 Subject: [PATCH 079/702] MNT: Remove gifti.gifti.data_tag and accommodating hacks --- nibabel/gifti/gifti.py | 22 +--------------------- nibabel/gifti/tests/test_gifti.py | 6 ------ 2 files changed, 1 insertion(+), 27 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 6fbc9245a7..adc7f83b56 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -378,21 +378,6 @@ def print_summary(self): print('Affine Transformation Matrix: \n', self.xform) -@deprecate_with_version( - "data_tag is an internal API that will be discontinued.", - '2.1', '4.0') -def data_tag(dataarray, encoding, datatype, ordering): - class DataTag(xml.XmlSerializable): - - def __init__(self, *args): - self.args = args - - def _to_xml_element(self): - return _data_tag_element(*self.args) - - return DataTag(dataarray, encoding, datatype, ordering).to_xml() - - def _data_tag_element(dataarray, encoding, dtype, ordering): """ Creates data tag with given `encoding`, returns as XML element """ @@ -400,13 +385,8 @@ def _data_tag_element(dataarray, encoding, dtype, ordering): order = array_index_order_codes.npcode[ordering] enclabel = gifti_encoding_codes.label[encoding] if enclabel == 'ASCII': - # XXX Accommodating data_tag API - # On removal (nibabel 4.0) drop str case - da = _arr2txt(dataarray, dtype if isinstance(dtype, str) else KIND2FMT[dtype.kind]) + da = _arr2txt(dataarray, KIND2FMT[dtype.kind]) elif enclabel in ('B64BIN', 'B64GZ'): - # XXX Accommodating data_tag API - don't try to fix dtype - if isinstance(dtype, str): - dtype = dataarray.dtype out = np.asanyarray(dataarray, dtype).tobytes(order) if enclabel == 'B64GZ': out = zlib.compress(out) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 8249d01f92..31b930b7cd 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -12,7 +12,6 @@ from .. import (GiftiImage, GiftiDataArray, GiftiLabel, GiftiLabelTable, GiftiMetaData, GiftiNVPairs, GiftiCoordSystem) -from ..gifti import data_tag from ...nifti1 import data_type_codes from ...fileholders import FileHolder from ...deprecator import ExpiredDeprecationError @@ -376,11 +375,6 @@ def test_gifti_coord(): gcs.to_xml() -def test_data_tag_deprecated(): - with pytest.raises(ExpiredDeprecationError): - data_tag(np.array([]), 'ASCII', '%i', 1) - - def test_gifti_round_trip(): # From section 14.4 in GIFTI Surface Data Format Version 1.0 # (with some adaptations) From eeaa4ba9de442791de005e6b351d904b3cc6658e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:33:55 -0400 Subject: [PATCH 080/702] MNT: Complete various GIFTI removals --- nibabel/gifti/gifti.py | 152 ------------------- nibabel/gifti/tests/test_gifti.py | 40 ----- nibabel/gifti/tests/test_parse_gifti_fast.py | 27 ---- nibabel/tests/test_removalschedule.py | 2 - 4 files changed, 221 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index adc7f83b56..31df1d813e 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -122,13 +122,6 @@ def data(self): def from_dict(klass, data_dict): return klass(data_dict) - @deprecate_with_version( - 'get_metadata method deprecated. ' - "Use the metadata property instead.", - '2.1', '4.0') - def get_metadata(self): - return dict(self) - @property @deprecate_with_version( 'metadata property deprecated. Use GiftiMetaData object ' @@ -274,13 +267,6 @@ def __repr__(self): r, g, b, a = chars.astype('u1') return f'' - @deprecate_with_version( - 'get_rgba method deprecated. ' - "Use the rgba property instead.", - '2.1', '4.0') - def get_rgba(self): - return self.rgba - @property def rgba(self): """ Returns RGBA as tuple """ @@ -488,68 +474,6 @@ def __repr__(self): def num_dim(self): return len(self.dims) - # Setter for backwards compatibility with pymvpa - @num_dim.setter - @deprecate_with_version( - "num_dim will be read-only in future versions of nibabel", - '2.1', '4.0') - def num_dim(self, value): - if value != len(self.dims): - raise ValueError(f'num_dim value {value} != number of ' - f'dimensions len(self.dims) {len(self.dims)}') - - @classmethod - @deprecate_with_version( - 'from_array method is deprecated. ' - 'Please use GiftiDataArray constructor instead.', - '2.1', '4.0') - def from_array(klass, - darray, - intent="NIFTI_INTENT_NONE", - datatype=None, - encoding="GIFTI_ENCODING_B64GZ", - endian=sys.byteorder, - coordsys=None, - ordering="C", - meta=None): - """ Creates a new Gifti data array - - Parameters - ---------- - darray : ndarray - NumPy data array - intent : string - NIFTI intent code, see nifti1.intent_codes - datatype : None or string, optional - NIFTI data type codes, see nifti1.data_type_codes - If None, the datatype of the NumPy array is taken. - encoding : string, optionaal - Encoding of the data, see util.gifti_encoding_codes; - default: GIFTI_ENCODING_B64GZ - endian : string, optional - The Endianness to store the data array. Should correspond to the - machine endianness. default: system byteorder - coordsys : GiftiCoordSystem, optional - If None, a identity transformation is taken. - ordering : string, optional - The ordering of the array. see util.array_index_order_codes; - default: RowMajorOrder - C ordering - meta : None or dict, optional - A dictionary for metadata information. If None, gives empty dict. - - Returns - ------- - da : instance of our own class - """ - return klass(data=darray, - intent=intent, - datatype=datatype, - encoding=encoding, - endian=endian, - coordsys=coordsys, - ordering=ordering, - meta=meta) - def _to_xml_element(self): # fix endianness to machine endianness self.endian = gifti_endian_codes.code[sys.byteorder] @@ -580,40 +504,6 @@ def _to_xml_element(self): return data_array - @deprecate_with_version( - 'to_xml_open method deprecated. ' - 'Use the to_xml() function instead.', - '2.1', '4.0') - def to_xml_open(self): - out = """\n""" - di = "" - for i, n in enumerate(self.dims): - di = di + f'\tDim{i}="{n}\"\n' - return out % (intent_codes.niistring[self.intent], - data_type_codes.niistring[self.datatype], - array_index_order_codes.label[self.ind_ord], - str(self.num_dim), - str(di), - gifti_encoding_codes.specs[self.encoding], - gifti_endian_codes.specs[self.endian], - self.ext_fname, - self.ext_offset, - ) - - @deprecate_with_version( - 'to_xml_close method deprecated. ' - 'Use the to_xml() function instead.', - '2.1', '4.0') - def to_xml_close(self): - return "\n" - def print_summary(self): print('Intent: ', intent_codes.niistring[self.intent]) print('DataType: ', data_type_codes.niistring[self.datatype]) @@ -630,13 +520,6 @@ def print_summary(self): print('Coordinate System:') print(self.coordsys.print_summary()) - @deprecate_with_version( - 'get_metadata method deprecated. ' - "Use the metadata property instead.", - '2.1', '4.0') - def get_metadata(self): - return dict(self.meta) - @property def metadata(self): """ Returns metadata as dictionary """ @@ -718,20 +601,6 @@ def labeltable(self, labeltable): raise TypeError("Not a valid GiftiLabelTable instance") self._labeltable = labeltable - @deprecate_with_version( - 'set_labeltable method deprecated. ' - "Use the gifti_img.labeltable property instead.", - '2.1', '4.0') - def set_labeltable(self, labeltable): - self.labeltable = labeltable - - @deprecate_with_version( - 'get_labeltable method deprecated. ' - "Use the gifti_img.labeltable property instead.", - '2.1', '4.0') - def get_labeltable(self): - return self.labeltable - @property def meta(self): return self._meta @@ -748,20 +617,6 @@ def meta(self, meta): raise TypeError("Not a valid GiftiMetaData instance") self._meta = meta - @deprecate_with_version( - 'set_meta method deprecated. ' - "Use the gifti_img.meta property instead.", - '2.1', '4.0') - def set_metadata(self, meta): - self.meta = meta - - @deprecate_with_version( - 'get_meta method deprecated. ' - "Use the gifti_img.meta property instead.", - '2.1', '4.0') - def get_meta(self): - return self.meta - def add_gifti_data_array(self, dataarr): """ Adds a data array to the GiftiImage @@ -925,13 +780,6 @@ def agg_data(self, intent_code=None): return all_data - @deprecate_with_version( - 'getArraysFromIntent method deprecated. ' - "Use get_arrays_from_intent instead.", - '2.1', '4.0') - def getArraysFromIntent(self, intent): - return self.get_arrays_from_intent(intent) - def print_summary(self): print('----start----') print('Source filename: ', self.get_filename()) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 31b930b7cd..1fa4eb8917 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -14,7 +14,6 @@ GiftiCoordSystem) from ...nifti1 import data_type_codes from ...fileholders import FileHolder -from ...deprecator import ExpiredDeprecationError from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest @@ -182,32 +181,6 @@ def test_dataarray_init(): assert gda(ext_offset=12).ext_offset == 12 -def test_dataarray_from_array(): - with pytest.raises(ExpiredDeprecationError): - GiftiDataArray.from_array(np.ones((3, 4))) - - -def test_to_xml_open_close_deprecations(): - # Smoke test on deprecated functions - da = GiftiDataArray(np.ones((1,)), 'triangle') - with pytest.raises(ExpiredDeprecationError): - da.to_xml_open() - with pytest.raises(ExpiredDeprecationError): - da.to_xml_close() - - -def test_num_dim_deprecation(): - da = GiftiDataArray(np.ones((2, 3, 4))) - # num_dim is property, set automatically from len(da.dims) - assert da.num_dim == 3 - # setting num_dim to correct value is deprecated - with pytest.raises(ExpiredDeprecationError): - da.num_dim = 3 - # setting num_dim to incorrect value is also deprecated - with pytest.raises(ExpiredDeprecationError): - da.num_dim = 4 - - def test_labeltable(): img = GiftiImage() assert len(img.labeltable.labels) == 0 @@ -217,12 +190,6 @@ def test_labeltable(): img.labeltable = new_table assert len(img.labeltable.labels) == 2 - # Test deprecations - with pytest.raises(ExpiredDeprecationError): - newer_table = GiftiLabelTable() - newer_table.labels += ['test', 'me', 'again'] - img.set_labeltable(newer_table) - def test_metadata(): md = GiftiMetaData(key='value') @@ -240,9 +207,6 @@ def test_metadata(): assert md.data[0].name == 'key' assert md.data[0].value == 'value' assert len(w) == 2 - # Test deprecation - with pytest.raises(ExpiredDeprecationError): - md.get_metadata() def test_metadata_list_interface(): @@ -347,10 +311,6 @@ def assign_rgba(gl, val): pytest.raises(ValueError, assign_rgba, gl3, rgba[:2]) pytest.raises(ValueError, assign_rgba, gl3, rgba.tolist() + rgba.tolist()) - # Test deprecation - with pytest.raises(ExpiredDeprecationError): - gl3.get_rgba() - # Test default value gl4 = GiftiLabel() assert len(gl4.rgba) == 4 diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 14a576d25b..a02761027a 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -185,18 +185,6 @@ def test_load_metadata(): assert img.version == '1.0' -def test_metadata_deprecations(): - img = load(datafiles[0]) - me = img.meta - - # Test deprecation - with pytest.raises(ExpiredDeprecationError): - img.get_meta() - - with pytest.raises(ExpiredDeprecationError): - img.set_metadata(me) - - def test_load_dataarray1(): img1 = load(DATA_FILE1) # Round trip @@ -319,9 +307,6 @@ def test_load_getbyintent(): da = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET") assert len(da) == 1 - with pytest.raises(ExpiredDeprecationError): - img.getArraysFromIntent("NIFTI_INTENT_POINTSET") - da = img.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE") assert len(da) == 1 @@ -349,18 +334,6 @@ def test_load_labeltable(): assert img.labeltable.labels[1].alpha == 1 -def test_labeltable_deprecations(): - img = load(DATA_FILE6) - lt = img.labeltable - - # Test deprecation - with pytest.raises(ExpiredDeprecationError): - img.get_labeltable() - - with pytest.raises(ExpiredDeprecationError): - img.set_labeltable(lt) - - def test_parse_dataarrays(): fn = 'bad_daa.gii' img = gi.GiftiImage() diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 0595e48360..abafe4a1a7 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -47,7 +47,6 @@ ]), ("5.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_data"), ("nibabel.freesurfer.mghformat", "MGHHeader", "_header_data"), - ("nibabel.gifti.gifti", "GiftiDataArray", "num_dim"), ("nibabel.gifti.gifti", "GiftiDataArray", "from_array"), ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_open"), ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_close"), @@ -57,7 +56,6 @@ ("nibabel.gifti.gifti", "GiftiImage", "get_metadata"), ("nibabel.gifti.gifti", "GiftiImage", "set_metadata"), ("nibabel.gifti.gifti", "GiftiImage", "getArraysFromIntent"), - ("nibabel.gifti.gifti", "GiftiImage", "getArraysFromIntent"), ("nibabel.gifti.gifti", "GiftiMetaData", "get_metadata"), ("nibabel.gifti.gifti", "GiftiLabel", "get_rgba"), ("nibabel.nicom.dicomwrappers", "Wrapper", "get_affine"), From 1b7a4a02afc932b25963b22e0a8a165118ec8e7e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:36:45 -0400 Subject: [PATCH 081/702] MNT: Remove imagclasses.{ext,class}_map --- nibabel/__init__.py | 2 +- nibabel/imageclasses.py | 88 ------------------------------ nibabel/tests/test_imageclasses.py | 11 +--- 3 files changed, 2 insertions(+), 99 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 777cd575ff..152710cccf 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -61,7 +61,7 @@ from .orientations import (io_orientation, orientation_affine, flip_axis, OrientationError, apply_orientation, aff2axcodes) -from .imageclasses import class_map, ext_map, all_image_classes +from .imageclasses import all_image_classes from . import mriutils from . import streamlines from . import viewers diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index f657822977..6b26ac0c05 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -20,12 +20,6 @@ from .parrec import PARRECImage from .spm99analyze import Spm99AnalyzeImage from .spm2analyze import Spm2AnalyzeImage -from .volumeutils import Recoder -from .deprecated import deprecate_with_version - -from .optpkg import optional_package -_, have_scipy, _ = optional_package('scipy') - # Ordered by the load/save priority. all_image_classes = [Nifti1Pair, Nifti1Image, Nifti2Pair, @@ -34,88 +28,6 @@ Minc1Image, Minc2Image, MGHImage, PARRECImage, GiftiImage, AFNIImage] - -# DEPRECATED: mapping of names to classes and class functionality -class ClassMapDict(dict): - - @deprecate_with_version('class_map is deprecated.', - '2.1', '4.0') - def __getitem__(self, *args, **kwargs): - return super(ClassMapDict, self).__getitem__(*args, **kwargs) - - -class_map = ClassMapDict( - analyze={'class': AnalyzeImage, # Image class - 'ext': '.img', # characteristic image extension - 'has_affine': False, # class can store an affine - 'makeable': True, # empty image can be easily made in memory - 'rw': True}, # image can be written - spm99analyze={'class': Spm99AnalyzeImage, - 'ext': '.img', - 'has_affine': True, - 'makeable': True, - 'rw': have_scipy}, - spm2analyze={'class': Spm2AnalyzeImage, - 'ext': '.img', - 'has_affine': True, - 'makeable': True, - 'rw': have_scipy}, - nifti_pair={'class': Nifti1Pair, - 'ext': '.img', - 'has_affine': True, - 'makeable': True, - 'rw': True}, - nifti_single={'class': Nifti1Image, - 'ext': '.nii', - 'has_affine': True, - 'makeable': True, - 'rw': True}, - minc={'class': Minc1Image, - 'ext': '.mnc', - 'has_affine': True, - 'makeable': True, - 'rw': False}, - mgh={'class': MGHImage, - 'ext': '.mgh', - 'has_affine': True, - 'makeable': True, - 'rw': True}, - mgz={'class': MGHImage, - 'ext': '.mgz', - 'has_affine': True, - 'makeable': True, - 'rw': True}, - par={'class': PARRECImage, - 'ext': '.par', - 'has_affine': True, - 'makeable': False, - 'rw': False}, - afni={'class': AFNIImage, - 'ext': '.brik', - 'has_affine': True, - 'makeable': False, - 'rw': False}) - - -class ExtMapRecoder(Recoder): - - @deprecate_with_version('ext_map is deprecated.', - '2.1', '4.0') - def __getitem__(self, *args, **kwargs): - return super(ExtMapRecoder, self).__getitem__(*args, **kwargs) - - -# mapping of extensions to default image class names -ext_map = ExtMapRecoder(( - ('nifti_single', '.nii'), - ('nifti_pair', '.img', '.hdr'), - ('minc', '.mnc'), - ('mgh', '.mgh'), - ('mgz', '.mgz'), - ('par', '.par'), - ('brik', '.brik') -)) - # Image classes known to require spatial axes to be first in index ordering. # When adding an image class, consider whether the new class should be listed # here. diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 43096e4347..601414e012 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -14,10 +14,9 @@ from nibabel.nifti2 import Nifti2Image from nibabel import imageclasses -from nibabel.imageclasses import spatial_axes_first, class_map, ext_map +from nibabel.imageclasses import spatial_axes_first from nibabel.optpkg import optional_package -from nibabel.deprecator import ExpiredDeprecationError have_h5py = optional_package('h5py')[1] @@ -49,11 +48,3 @@ def test_spatial_axes_first(): img = nib.load(pjoin(DATA_DIR, fname)) assert len(img.shape) == 4 assert not spatial_axes_first(img) - - -def test_deprecations(): - with pytest.raises(ExpiredDeprecationError): - class_map['nifti_single'] - with pytest.raises(ExpiredDeprecationError): - nifti_ext = ext_map['.nii'] - From 7dd75f83b9d549206562aedc4511a093efe6859f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:37:51 -0400 Subject: [PATCH 082/702] MNT: Remove loadsave.which_analyze_type --- nibabel/loadsave.py | 42 --------------------------- nibabel/tests/test_image_load_save.py | 9 ------ 2 files changed, 51 deletions(-) diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index ff176f541d..763bf20788 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -282,45 +282,3 @@ def read_img_data(img, prefer='scaled'): if prefer == 'scaled': return hdr.data_from_fileobj(fileobj) return hdr.raw_data_from_fileobj(fileobj) - - -@deprecate_with_version('which_analyze_type deprecated.', '3.2', '4.0') -def which_analyze_type(binaryblock): - """ Is `binaryblock` from NIfTI1, NIfTI2 or Analyze header? - - Parameters - ---------- - binaryblock : bytes - The `binaryblock` is 348 bytes that might be NIfTI1, NIfTI2, Analyze, - or None of the the above. - - Returns - ------- - hdr_type : str - * a nifti1 header (pair or single) -> return 'nifti1' - * a nifti2 header (pair or single) -> return 'nifti2' - * an Analyze header -> return 'analyze' - * None of the above -> return None - - Notes - ----- - Algorithm: - - * read in the first 4 bytes from the file as 32-bit int ``sizeof_hdr`` - * if ``sizeof_hdr`` is 540 or byteswapped 540 -> assume nifti2 - * Check for 'ni1', 'n+1' magic -> assume nifti1 - * if ``sizeof_hdr`` is 348 or byteswapped 348 assume Analyze - * Return None - """ - from .nifti1 import header_dtype - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, buffer=binaryblock) - bs_hdr_struct = hdr_struct.byteswap() - sizeof_hdr = hdr_struct['sizeof_hdr'] - bs_sizeof_hdr = bs_hdr_struct['sizeof_hdr'] - if 540 in (sizeof_hdr, bs_sizeof_hdr): - return 'nifti2' - if hdr_struct['magic'] in (b'ni1', b'n+1'): - return 'nifti1' - if 348 in (sizeof_hdr, bs_sizeof_hdr): - return 'analyze' - return None diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 07c8bf8c5d..12a49ecd7d 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -29,7 +29,6 @@ from ..volumeutils import native_code, swapped_code from ..optpkg import optional_package from ..spatialimages import SpatialImage -from ..deprecator import ExpiredDeprecationError from numpy.testing import assert_array_equal, assert_array_almost_equal import pytest @@ -271,14 +270,6 @@ def test_filename_save(): shutil.rmtree(pth) -def test_analyze_detection(): - # Test detection of Analyze, Nifti1 and Nifti2 - # Algorithm is as described in loadsave:which_analyze_type - hdr = Nifti1Header(b'\0' * 348, check=False) - with pytest.raises(ExpiredDeprecationError): - nils.which_analyze_type(hdr.binaryblock) - - def test_guessed_image_type(): # Test whether we can guess the image type from example files with pytest.deprecated_call(): From c185da2d1d3135020765a293b934670be9373913 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:39:14 -0400 Subject: [PATCH 083/702] MNT: Remove volumeutils.BinOpener/allopen --- nibabel/tests/test_openers.py | 6 ------ nibabel/tests/test_volumeutils.py | 9 --------- nibabel/volumeutils.py | 32 +------------------------------ 3 files changed, 1 insertion(+), 46 deletions(-) diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 0a687353e3..b25dc2db6d 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -23,7 +23,6 @@ DeterministicGzipFile, ) from ..tmpdirs import InTemporaryDirectory -from ..volumeutils import BinOpener from ..optpkg import optional_package import unittest @@ -114,11 +113,6 @@ def test_Opener_various(): assert fobj.fileno() != 0 -def test_BinOpener(): - with pytest.raises(ExpiredDeprecationError): - BinOpener('test.txt', 'r') - - class MockIndexedGzipFile(GzipFile): def __init__(self, *args, **kwargs): self._drop_handles = kwargs.pop('drop_handles', False) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 67e776daf1..3e6ba1bab4 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -29,7 +29,6 @@ from ..volumeutils import (array_from_file, _is_compressed_fobj, array_to_file, - allopen, # for backwards compatibility fname_ext_ul_case, write_zeros, seek_tell, @@ -49,7 +48,6 @@ from ..openers import Opener, BZ2File from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range) -from ..deprecator import ExpiredDeprecationError from ..optpkg import optional_package from numpy.testing import (assert_array_almost_equal, @@ -1042,13 +1040,6 @@ def test_fname_ext_ul_case(): assert fname_ext_ul_case('afile.TxT') == 'afile.TxT' -def test_allopen_deprecated(): - # This import into volumeutils is for compatibility. The code is the - # ``openers`` module. - with pytest.raises(ExpiredDeprecationError): - fobj = allopen(__file__) - - def test_shape_zoom_affine(): shape = (3, 5, 7) zooms = (3, 2, 1) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 4e22c6ce29..7f18c20f3f 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -19,8 +19,7 @@ import numpy as np from .casting import shared_range, OK_FLOATS -from .openers import Opener, BZ2File, IndexedGzipFile -from .deprecated import deprecate_with_version +from .openers import BZ2File, IndexedGzipFile from .externals.oset import OrderedSet from .optpkg import optional_package @@ -1337,17 +1336,6 @@ def rec2dict(rec): return dct -class BinOpener(Opener): - """ Deprecated class that used to handle .mgz through specialized logic.""" - __doc__ = Opener.__doc__ - - @deprecate_with_version('BinOpener class deprecated. ' - "Please use Opener class instead.", - '2.1', '4.0') - def __init__(self, *args, **kwargs): - return super(BinOpener, self).__init__(*args, **kwargs) - - def fname_ext_ul_case(fname): """ `fname` with ext changed to upper / lower case if file exists @@ -1378,21 +1366,3 @@ def fname_ext_ul_case(fname): if exists(mod_fname): return mod_fname return fname - - -@deprecate_with_version('allopen is deprecated. ' - 'Please use "Opener" class instead.', - '2.0', '4.0') -def allopen(fileish, *args, **kwargs): - """ Compatibility wrapper for old ``allopen`` function - - Wraps creation of ``Opener`` instance, while picking up module global - ``default_compresslevel``. - - Please see docstring of ``Opener`` for details. - """ - - class MyOpener(Opener): - default_compresslevel = default_compresslevel - - return MyOpener(fileish, *args, **kwargs) From 1fb2b4e6b83f2be6f2cc28be4a2ef0b0c04c8b58 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:40:05 -0400 Subject: [PATCH 084/702] MNT: Remove orientations.orientation_affine --- nibabel/__init__.py | 3 +-- nibabel/orientations.py | 8 -------- nibabel/tests/test_orientations.py | 9 +-------- 3 files changed, 2 insertions(+), 18 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 152710cccf..f96e80f0eb 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -58,8 +58,7 @@ from .freesurfer import MGHImage from .funcs import (squeeze_image, concat_images, four_to_three, as_closest_canonical) -from .orientations import (io_orientation, orientation_affine, - flip_axis, OrientationError, +from .orientations import (io_orientation, flip_axis, OrientationError, apply_orientation, aff2axcodes) from .imageclasses import all_image_classes from . import mriutils diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 1cfb07e55f..fab106cab5 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -228,14 +228,6 @@ def inv_ornt_aff(ornt, shape): return np.dot(undo_flip, undo_reorder) -@deprecate_with_version('orientation_affine deprecated. ' - 'Please use inv_ornt_aff instead.', - '3.0', - '4.0') -def orientation_affine(ornt, shape): - return inv_ornt_aff(ornt, shape) - - @deprecate_with_version('flip_axis is deprecated. ' 'Please use numpy.flip instead.', '3.2', diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 22d899c4dc..77b892acbc 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -17,11 +17,9 @@ from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, flip_axis, apply_orientation, OrientationError, - ornt2axcodes, axcodes2ornt, aff2axcodes, - orientation_affine) + ornt2axcodes, axcodes2ornt, aff2axcodes) from ..affines import from_matvec, to_matvec -from ..deprecator import ExpiredDeprecationError IN_ARRS = [np.eye(4), @@ -355,11 +353,6 @@ def test_inv_ornt_aff(): inv_ornt_aff([[0, 1], [1, -1], [np.nan, np.nan]], (3, 4, 5)) -def test_orientation_affine_deprecation(): - with pytest.raises(ExpiredDeprecationError): - orientation_affine([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) - - def test_flip_axis_deprecation(): a = np.arange(24).reshape((2, 3, 4)) axis = 1 From 60e3a13c90770602d2fab652bd0c81180718afd4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:42:00 -0400 Subject: [PATCH 085/702] MNT: Complete deprecations in spatialimages --- nibabel/spatialimages.py | 24 ------------------------ nibabel/tests/test_ecat.py | 4 ---- nibabel/tests/test_image_api.py | 9 +-------- nibabel/tests/test_spatialimages.py | 10 +--------- 4 files changed, 2 insertions(+), 45 deletions(-) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index d2e69a0fc5..09744d0149 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -21,9 +21,6 @@ methods: * .get_fdata() - * .get_data() (deprecated, use get_fdata() instead) - * .get_affine() (deprecated, use affine property instead) - * .get_header() (deprecated, use header property instead) * .to_filename(fname) - writes data to filename(s) derived from ``fname``, where the derivation may differ between formats. * to_file_map() - save image to files with which the image is already @@ -141,7 +138,6 @@ from .viewers import OrthoSlicer3D from .volumeutils import shape_zoom_affine from .fileslice import canonical_slicers -from .deprecated import deprecate_with_version from .orientations import apply_orientation, inv_ornt_aff @@ -307,17 +303,6 @@ def supported_np_types(obj): return set(supported) -class Header(SpatialHeader): - """Alias for SpatialHeader; kept for backwards compatibility.""" - - @deprecate_with_version('Header class is deprecated.\n' - 'Please use SpatialHeader instead.' - 'instead.', - '2.1', '4.0') - def __init__(self, *args, **kwargs): - super(Header, self).__init__(*args, **kwargs) - - class ImageDataError(Exception): pass @@ -527,15 +512,6 @@ def get_data_dtype(self): def set_data_dtype(self, dtype): self._header.set_data_dtype(dtype) - @deprecate_with_version('get_affine method is deprecated.\n' - 'Please use the ``img.affine`` property ' - 'instead.', - '2.1', '4.0') - def get_affine(self): - """ Get affine from image - """ - return self.affine - @classmethod def from_image(klass, img): """ Class method to create new instance of own class from `img` diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 3a48f204d3..adda5433d1 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -240,10 +240,6 @@ def test_isolation(self): aff[0, 0] = 99 assert not np.all(img.affine == aff) - def test_get_affine_deprecated(self): - with pytest.raises(ExpiredDeprecationError): - self.img.get_affine() - def test_float_affine(self): # Check affines get converted to float img_klass = self.image_class diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 21c7b14086..aa4fa84736 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -502,8 +502,7 @@ def validate_header_shape(self, imaker, params): class AffineMixin: """ Adds test of affine property, method - Add this one if your image has an ``affine`` property. If so, it should - (for now) also have a ``get_affine`` method returning the same result. + Add this one if your image has an ``affine`` property. """ def validate_affine(self, imaker, params): @@ -517,12 +516,6 @@ def validate_affine(self, imaker, params): with pytest.raises(AttributeError): img.affine = np.eye(4) - def validate_affine_deprecated(self, imaker, params): - # Check deprecated affine API - img = imaker() - with pytest.raises(ExpiredDeprecationError): - img.get_affine() - class SerializeMixin: def validate_to_from_stream(self, imaker, params): diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index e7cad0de2c..fc11452151 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -15,7 +15,7 @@ import numpy as np from io import BytesIO -from ..spatialimages import SpatialHeader, SpatialImage, HeaderDataError, Header +from ..spatialimages import SpatialHeader, SpatialImage, HeaderDataError from ..imageclasses import spatial_axes_first import pytest @@ -30,7 +30,6 @@ ) from ..tmpdirs import InTemporaryDirectory -from ..deprecator import ExpiredDeprecationError from .. import load as top_load def test_header_init(): @@ -605,10 +604,3 @@ def test_load_mmap(self): func(param1, mmap='rw') with pytest.raises(ValueError): func(param1, mmap='r+') - - -def test_header_deprecated(): - class MyHeader(Header): - pass - with pytest.raises(ExpiredDeprecationError): - MyHeader() From e0386150dae9f28f136f05fcc6c8192cb7a16cf4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:42:51 -0400 Subject: [PATCH 086/702] MNT: Remove nicom.dicomwrappers.Wrapper.get_affine --- nibabel/nicom/dicomwrappers.py | 9 --------- nibabel/nicom/tests/test_dicomwrappers.py | 3 --- 2 files changed, 12 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index d1ca3ee173..9f180a86a3 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -22,7 +22,6 @@ from .dwiparams import B2q, nearest_pos_semi_def, q2bg from ..openers import ImageOpener from ..onetime import auto_attr as one_time -from ..deprecated import deprecate_with_version pydicom = optional_package("pydicom")[0] @@ -101,7 +100,6 @@ class Wrapper: Methods: - * get_affine() (deprecated, use affine property instead) * get_data() * get_pixel_array() * is_same_series(other) @@ -293,13 +291,6 @@ def get(self, key, default=None): """ Get values from underlying dicom data """ return self.dcm_data.get(key, default) - @deprecate_with_version('get_affine method is deprecated.\n' - 'Please use the ``img.affine`` property ' - 'instead.', - '2.5.1', '4.0') - def get_affine(self): - return self.affine - @property def affine(self): """ Mapping between voxel and DICOM coordinate system diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index fcb3cc1703..d65afc6d27 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -13,7 +13,6 @@ from .. import dicomwrappers as didw from .. import dicomreaders as didr from ...volumeutils import endian_codes -from ...deprecator import ExpiredDeprecationError import pytest from unittest import TestCase @@ -632,8 +631,6 @@ def test_affine(self): # Make sure we find orientation/position/spacing info dw = didw.wrapper_from_file(DATA_FILE_4D) aff = dw.affine - with pytest.raises(ExpiredDeprecationError): - dw.get_affine() @dicom_test def test_data_real(self): From aec9bd65f2e0e8b337ab8f01a1c26663096e2bbf Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:43:42 -0400 Subject: [PATCH 087/702] MNT: Remove ArraySequence.data property --- nibabel/streamlines/array_sequence.py | 12 ------------ nibabel/streamlines/tests/test_array_sequence.py | 6 ------ 2 files changed, 18 deletions(-) diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 5d40937b1c..cff930aaee 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -5,8 +5,6 @@ import numpy as np -from ..deprecated import deprecate_with_version - MEGABYTE = 1024 * 1024 @@ -154,16 +152,6 @@ def total_nb_rows(self): """ Total number of rows in this array sequence. """ return np.sum(self._lengths) - @property - @deprecate_with_version("'ArraySequence.data' property is deprecated.\n" - "Please use the 'ArraySequence.get_data()' method instead", - '3.0', '4.0') - def data(self): - """ Elements in this array sequence. """ - view = self._data.view() - view.setflags(write=False) - return view - def get_data(self): """ Returns a *copy* of the elements in this array sequence. diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index 4a5c21aa2e..b75b4432d3 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -10,7 +10,6 @@ from numpy.testing import assert_array_equal from ..array_sequence import ArraySequence, is_array_sequence, concatenate -from ...deprecator import ExpiredDeprecationError SEQ_DATA = {} @@ -95,11 +94,6 @@ def test_creating_arraysequence_from_list(self): check_arr_seq(ArraySequence(iter(SEQ_DATA['data']), buffer_size), SEQ_DATA['data']) - def test_deprecated_data_attribute(self): - seq = ArraySequence(SEQ_DATA['data']) - with pytest.raises(ExpiredDeprecationError): - seq.data - def test_creating_arraysequence_from_generator(self): gen_1, gen_2 = itertools.tee((e for e in SEQ_DATA['data'])) seq = ArraySequence(gen_1) From f7592083571555a1bc6f86a62aa8535ee3403ead Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:44:13 -0400 Subject: [PATCH 088/702] MNT: Remove EcatImage.from_filespec --- nibabel/ecat.py | 9 --------- nibabel/tests/test_ecat.py | 6 ------ 2 files changed, 15 deletions(-) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 494ea95e2d..54f600f147 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -54,7 +54,6 @@ from .arraywriters import make_array_writer from .wrapstruct import WrapStruct from .fileslice import canonical_slicers, predict_shape, slice2outax -from .deprecated import deprecate_with_version BLOCK_SIZE = 512 @@ -863,14 +862,6 @@ def get_subheaders(self): """get access to subheaders""" return self._subheader - @classmethod - @deprecate_with_version('from_filespec class method is deprecated.\n' - 'Please use the ``from_file_map`` class method ' - 'instead.', - '2.1', '4.0') - def from_filespec(klass, filespec): - return klass.from_filename(filespec) - @staticmethod def _get_fileholders(file_map): """ returns files specific to header and image of the image diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index adda5433d1..9e56fd73c7 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -23,7 +23,6 @@ from ..testing import data_path, suppress_warnings from ..tmpdirs import InTemporaryDirectory -from ..deprecator import ExpiredDeprecationError from . import test_wrapstruct as tws from .test_fileslice import slicer_samples @@ -268,8 +267,3 @@ def test_mlist_regression(self): # Test mlist is as same as for nibabel 1.3.0 assert_array_equal(self.img.get_mlist(), [[16842758, 3, 3011, 1]]) - - -def test_from_filespec_deprecation(): - with pytest.raises(ExpiredDeprecationError): - EcatImage.from_filespec(ecat_file) From 5e3314c337bd037a03f6247f42c1de8d56354bc4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:44:48 -0400 Subject: [PATCH 089/702] MNT: Remove FileBasedImage.get_header --- nibabel/filebasedimages.py | 11 ----------- nibabel/tests/test_image_api.py | 6 ------ 2 files changed, 17 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 5162263a28..17ac3e8180 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -15,7 +15,6 @@ from .filename_parser import (types_filenames, TypesFilenamesError, splitext_addext) from .openers import ImageOpener -from .deprecated import deprecate_with_version class ImageFileError(Exception): @@ -79,7 +78,6 @@ class FileBasedImage: methods: - * get_header() (deprecated, use header property instead) * to_filename(fname) - writes data to filename(s) derived from ``fname``, where the derivation may differ between formats. * to_file_map() - save image to files with which the image is already @@ -208,15 +206,6 @@ def __getitem__(self, key): """ raise TypeError("Cannot slice image objects.") - @deprecate_with_version('get_header method is deprecated.\n' - 'Please use the ``img.header`` property ' - 'instead.', - '2.1', '4.0') - def get_header(self): - """ Get header from image - """ - return self.header - def get_filename(self): """ Fetch the image filename diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index aa4fa84736..680458659d 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -119,12 +119,6 @@ def validate_header(self, imaker, params): with pytest.raises(AttributeError): img.header = hdr - def validate_header_deprecated(self, imaker, params): - # Check deprecated header API - img = imaker() - with pytest.raises(ExpiredDeprecationError): - hdr = img.get_header() - def validate_filenames(self, imaker, params): # Validate the filename, file_map interface From 420ea32a0979c14a6979f1d7b7374a5be50af426 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:48:21 -0400 Subject: [PATCH 090/702] MNT: Remove nan2zero argument to ArrayWriter.to_fileobj --- nibabel/arraywriters.py | 29 +++-------------------------- nibabel/tests/test_arraywriters.py | 17 ----------------- 2 files changed, 3 insertions(+), 43 deletions(-) diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index cdbec32fc6..c2bbb2912c 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -34,7 +34,6 @@ def __init__(self, array, out_dtype=None) from .casting import (int_to_float, as_int, int_abs, type_info, floor_exact, best_float, shared_range) from .volumeutils import finite_range, array_to_file -from .deprecator import ExpiredDeprecationError class WriterError(Exception): @@ -185,19 +184,6 @@ def finite_range(self): self._has_nan = has_nan return self._finite_range - def _check_nan2zero(self, nan2zero): - if nan2zero is None: - return - if nan2zero != self._nan2zero: - raise WriterError('Deprecated `nan2zero` argument to `to_fileobj` ' - 'must be same as class value set in __init__') - raise ExpiredDeprecationError( - 'Please remove `nan2zero` from call to `to_fileobj` ' - 'and use in instance __init__ instead.\n' - '* deprecated in version: 2.0\n' - '* Raises ExpiredDeprecationError as of version: 4.0\n' - ) - def _needs_nan2zero(self): """ True if nan2zero check needed for writing array """ return (self._nan2zero and @@ -205,7 +191,7 @@ def _needs_nan2zero(self): self.out_dtype.kind in 'iu' and self.has_nan) - def to_fileobj(self, fileobj, order='F', nan2zero=None): + def to_fileobj(self, fileobj, order='F'): """ Write array into `fileobj` Parameters @@ -213,10 +199,7 @@ def to_fileobj(self, fileobj, order='F', nan2zero=None): fileobj : file-like object order : {'F', 'C'} order (Fortran or C) to which to write array - nan2zero : {None, True, False}, optional, deprecated - Deprecated version of argument to __init__ with same name """ - self._check_nan2zero(nan2zero) array_to_file(self._array, fileobj, self._out_dtype, @@ -362,7 +345,7 @@ def _writing_range(self): return mn, mx return None, None - def to_fileobj(self, fileobj, order='F', nan2zero=None): + def to_fileobj(self, fileobj, order='F'): """ Write array into `fileobj` Parameters @@ -370,10 +353,7 @@ def to_fileobj(self, fileobj, order='F', nan2zero=None): fileobj : file-like object order : {'F', 'C'} order (Fortran or C) to which to write array - nan2zero : {None, True, False}, optional, deprecated - Deprecated version of argument to __init__ with same name """ - self._check_nan2zero(nan2zero) mn, mx = self._writing_range() array_to_file(self._array, fileobj, @@ -536,7 +516,7 @@ def _set_inter(self, val): self._inter = np.squeeze(self.scaler_dtype.type(val)) inter = property(_get_inter, _set_inter, None, 'get/set inter') - def to_fileobj(self, fileobj, order='F', nan2zero=None): + def to_fileobj(self, fileobj, order='F'): """ Write array into `fileobj` Parameters @@ -544,10 +524,7 @@ def to_fileobj(self, fileobj, order='F', nan2zero=None): fileobj : file-like object order : {'F', 'C'} order (Fortran or C) to which to write array - nan2zero : {None, True, False}, optional, deprecated - Deprecated version of argument to __init__ with same name """ - self._check_nan2zero(nan2zero) mn, mx = self._writing_range() array_to_file(self._array, fileobj, diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index df50b4cd6b..de55cd334b 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -13,7 +13,6 @@ make_array_writer, get_slope_inter) from ..casting import int_abs, type_info, shared_range, on_powerpc from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max -from ..deprecator import ExpiredDeprecationError from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest @@ -506,14 +505,6 @@ def test_nan2zero(): aw = awt(arr, np.float32, **kwargs) data_back = round_trip(aw) assert_array_equal(np.isnan(data_back), [True, False]) - # Expired deprecation error for nan2zero as argument to `to_fileobj` - with pytest.raises(ExpiredDeprecationError): - aw.to_fileobj(BytesIO(), 'F', True) - with pytest.raises(ExpiredDeprecationError): - aw.to_fileobj(BytesIO(), 'F', nan2zero=True) - # Error if nan2zero is not the value set at initialization - with pytest.raises(WriterError): - aw.to_fileobj(BytesIO(), 'F', False) # set explicitly aw = awt(arr, np.float32, nan2zero=True, **kwargs) data_back = round_trip(aw) @@ -527,14 +518,6 @@ def test_nan2zero(): data_back = round_trip(aw) astype_res = np.array(np.nan).astype(np.int32) assert_array_equal(data_back, [astype_res, 99]) - # Expired deprecation error for nan2zero as argument to `to_fileobj` - with pytest.raises(ExpiredDeprecationError): - aw.to_fileobj(BytesIO(), 'F', False) - with pytest.raises(ExpiredDeprecationError): - aw.to_fileobj(BytesIO(), 'F', nan2zero=False) - # Error if nan2zero is not the value set at initialization - with pytest.raises(WriterError): - aw.to_fileobj(BytesIO(), 'F', True) def test_byte_orders(): From 9463ca25d0de53570f48c64ff7f96cc47d263dd2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:52:37 -0400 Subject: [PATCH 091/702] MNT: Various removals not listed in removal schedule --- nibabel/dataobj_images.py | 7 -- nibabel/freesurfer/mghformat.py | 40 --------- nibabel/freesurfer/tests/test_mghformat.py | 15 ---- nibabel/gifti/__init__.py | 1 - nibabel/gifti/giftiio.py | 85 -------------------- nibabel/gifti/parse_gifti_fast.py | 21 ----- nibabel/gifti/tests/test_giftiio.py | 25 ------ nibabel/gifti/tests/test_parse_gifti_fast.py | 14 +--- nibabel/parrec.py | 24 ------ nibabel/tests/test_image_api.py | 10 --- nibabel/tests/test_parrec.py | 7 -- 11 files changed, 1 insertion(+), 248 deletions(-) delete mode 100644 nibabel/gifti/giftiio.py delete mode 100644 nibabel/gifti/tests/test_giftiio.py diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 7480a5cbfc..f1c6b663c0 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -48,13 +48,6 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): def dataobj(self): return self._dataobj - @property - @deprecate_with_version('_data attribute not part of public API. ' - 'please use "dataobj" property instead.', - '2.0', '4.0') - def _data(self): - return self._dataobj - @deprecate_with_version('get_data() is deprecated in favor of get_fdata(),' ' which has a more predictable return type. To ' 'obtain get_data() behavior going forward, use ' diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index d23c8e571f..9d2cdb905b 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -24,7 +24,6 @@ from ..openers import ImageOpener from ..batteryrunners import BatteryRunner, Report from ..wrapstruct import LabeledWrapStruct -from ..deprecated import deprecate_with_version # mgh header # See https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat @@ -462,45 +461,6 @@ def diagnose_binaryblock(klass, binaryblock, endianness=None): return '\n'.join([report.message for report in reports if report.message]) - class _HeaderData: - """ Provide interface to deprecated MGHHeader fields""" - def __init__(self, structarr): - self._structarr = structarr - - def __getitem__(self, item): - sa = self._structarr - if item == 'mrparams': - return np.hstack((sa['tr'], sa['flip_angle'], sa['te'], sa['ti'])) - return sa[item] - - def __setitem__(self, item, val): - sa = self._structarr - if item == 'mrparams': - sa['tr'], sa['flip_angle'], sa['te'], sa['ti'] = val - else: - sa[item] = val - - @property - @deprecate_with_version('_header_data is deprecated.\n' - 'Please use the _structarr interface instead.\n' - 'Note also that some fields have changed name and ' - 'shape.', - '2.3', '4.0') - def _header_data(self): - """ Deprecated field-access interface """ - return self._HeaderData(self._structarr) - - def __getitem__(self, item): - if item == 'mrparams': - return self._header_data[item] - return super(MGHHeader, self).__getitem__(item) - - def __setitem__(self, item, value): - if item == 'mrparams': - self._header_data[item] = value - else: - super(MGHHeader, self).__setitem__(item, value) - class MGHImage(SpatialImage, SerializableImage): """ Class for MGH format image diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 9c75d06208..4c812087c2 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -22,7 +22,6 @@ from ...volumeutils import sys_is_le from ...wrapstruct import WrapStructError from ... import imageglobals -from ...deprecator import ExpiredDeprecationError import pytest @@ -340,20 +339,6 @@ def test_mghheader_default_structarr(): MGHHeader.default_structarr(endianness=endianness) -def test_deprecated_fields(): - hdr = MGHHeader() - hdr_data = MGHHeader._HeaderData(hdr.structarr) - - # mrparams is the only deprecated field at the moment - # Accessing hdr_data is equivalent to accessing hdr, so double all checks, - # but expect success on hdr_data['mrparams'] - with pytest.raises(ExpiredDeprecationError): - hdr['mrparams'] - with pytest.raises(ExpiredDeprecationError): - hdr['mrparams'] = [1, 2, 3, 4] - assert_array_equal(hdr_data['mrparams'], 0) - - class TestMGHImage(tsi.TestSpatialImage, tsi.MmapImageMixin): """ Apply general image tests to MGHImage """ diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index c9bf85b3a0..54bfbd0ffa 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -17,6 +17,5 @@ gifti """ -from .giftiio import read, write from .gifti import (GiftiMetaData, GiftiNVPairs, GiftiLabelTable, GiftiLabel, GiftiCoordSystem, GiftiDataArray, GiftiImage) diff --git a/nibabel/gifti/giftiio.py b/nibabel/gifti/giftiio.py deleted file mode 100644 index 46219e8c1d..0000000000 --- a/nibabel/gifti/giftiio.py +++ /dev/null @@ -1,85 +0,0 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# General Gifti Input - Output to and from the filesystem -# Stephan Gerhard, Oktober 2010 -############## - -from ..deprecated import deprecate_with_version - - -@deprecate_with_version('giftiio.read function deprecated. ' - "Use nibabel.load() instead.", - '2.1', '4.0') -def read(filename): - """ Load a Gifti image from a file - - Parameters - ---------- - filename : string - The Gifti file to open, it has usually ending .gii - - Returns - ------- - img : GiftiImage - Returns a GiftiImage - """ - from ..loadsave import load - return load(filename) - - -@deprecate_with_version('giftiio.write function deprecated. ' - "Use nibabel.save() instead.", - '2.1', '4.0') -def write(image, filename): - """ Save the current image to a new file - - Parameters - ---------- - image : GiftiImage - A GiftiImage instance to store - filename : string - Filename to store the Gifti file to - - Returns - ------- - None - - Notes - ----- - We write all files with utf-8 encoding, and specify this at the top of the - XML file with the ``encoding`` attribute. - - The Gifti spec suggests using the following suffixes to your - filename when saving each specific type of data: - - .gii - Generic GIFTI File - .coord.gii - Coordinates - .func.gii - Functional - .label.gii - Labels - .rgba.gii - RGB or RGBA - .shape.gii - Shape - .surf.gii - Surface - .tensor.gii - Tensors - .time.gii - Time Series - .topo.gii - Topology - - The Gifti file is stored in endian convention of the current machine. - """ - from ..loadsave import save - return save(image, filename) diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 17ae695e55..ed55fd97ea 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -24,7 +24,6 @@ gifti_endian_codes) from ..nifti1 import data_type_codes, xform_codes, intent_codes from ..xmlutils import XmlParser -from ..deprecated import deprecate_with_version class GiftiParseError(ExpatError): @@ -409,23 +408,3 @@ def flush_chardata(self): def pending_data(self): """True if there is character data pending for processing""" return self._char_blocks is not None - - -class Outputter(GiftiImageParser): - - @deprecate_with_version('Outputter class deprecated. ' - "Use GiftiImageParser instead.", - '2.1', '4.0') - def __init__(self): - super(Outputter, self).__init__() - - def initialize(self): - """ Initialize outputter""" - self.__init__() - - -@deprecate_with_version('parse_gifti_file deprecated. ' - "Use GiftiImageParser.parse() instead.", - '2.1', '4.0') -def parse_gifti_file(fname=None, fptr=None, buffer_size=None): - GiftiImageParser(buffer_size=buffer_size).parse(fname=fname, fptr=fptr) diff --git a/nibabel/gifti/tests/test_giftiio.py b/nibabel/gifti/tests/test_giftiio.py deleted file mode 100644 index f2e2458120..0000000000 --- a/nibabel/gifti/tests/test_giftiio.py +++ /dev/null @@ -1,25 +0,0 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## - -from ..gifti import GiftiImage -from ..giftiio import read, write -from .test_parse_gifti_fast import DATA_FILE1 -from ...deprecator import ExpiredDeprecationError - -import pytest - - -def test_read_deprecated(tmp_path): - with pytest.raises(ExpiredDeprecationError): - read(DATA_FILE1) - - img = GiftiImage.from_filename(DATA_FILE1) - fname = tmp_path / 'test.gii' - with pytest.raises(ExpiredDeprecationError): - write(img, fname) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index a02761027a..d376611581 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -17,12 +17,10 @@ from .. import gifti as gi from ..util import gifti_endian_codes -from ..parse_gifti_fast import (Outputter, parse_gifti_file, GiftiParseError, - GiftiImageParser) +from ..parse_gifti_fast import GiftiParseError, GiftiImageParser from ...loadsave import load, save from ...nifti1 import xform_codes from ...tmpdirs import InTemporaryDirectory -from ...deprecator import ExpiredDeprecationError from numpy.testing import assert_array_almost_equal @@ -354,16 +352,6 @@ def test_parse_dataarrays(): assert img.numDA == 0 -def test_parse_deprecated(): - - # Test deprecation - with pytest.raises(ExpiredDeprecationError): - Outputter() - - with pytest.raises(ExpiredDeprecationError): - parse_gifti_file() - - def test_parse_with_buffersize(): for buff_sz in [None, 1, 2**12]: img2 = load(DATA_FILE2, buffer_size=buff_sz) diff --git a/nibabel/parrec.py b/nibabel/parrec.py index c2d7160806..304c0c2cc0 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -137,7 +137,6 @@ from .nifti1 import unit_codes from .fileslice import fileslice, strided_scalar from .openers import ImageOpener -from .deprecated import deprecate_with_version # PSL to RAS affine PSL_TO_RAS = np.array([[0, 0, -1, 0], # L -> R @@ -871,29 +870,6 @@ def _get_unique_image_prop(self, name): f'({props}). This is not supported.') return props[0] - @deprecate_with_version('get_voxel_size deprecated. ' - 'Please use "get_zooms" instead.', - '2.0', '4.0') - def get_voxel_size(self): - """Returns the spatial extent of a voxel. - - Does not include the slice gap in the slice extent. - - If you need the slice thickness not including the slice gap, use - ``self.image_defs['slice thickness']``. - - Returns - ------- - vox_size: shape (3,) ndarray - """ - # slice orientation for the whole image series - slice_thickness = self._get_unique_image_prop('slice thickness') - voxsize_inplane = self._get_unique_image_prop('pixel spacing') - voxsize = np.array((voxsize_inplane[0], - voxsize_inplane[1], - slice_thickness)) - return voxsize - def get_data_offset(self): """ PAR header always has 0 data offset (into REC file) """ return 0 diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 680458659d..16003fd79c 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -412,16 +412,6 @@ def _check_array_caching(self, imaker, meth_name, caching): data = get_data_func(dtype=float_type) assert (data is img.dataobj) == (arr_dtype == float_type) - def validate_data_deprecated(self, imaker, params): - # Check _data property still exists, but raises warning - img = imaker() - with pytest.raises(ExpiredDeprecationError): - assert_data_similar(img._data, params) - # Check setting _data raises error - fake_data = np.zeros(img.shape).astype(img.get_data_dtype()) - with pytest.raises(AttributeError): - img._data = fake_data - def validate_shape(self, imaker, params): # Validate shape img = imaker() diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index f40bf3b80a..22e805cb8f 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -23,7 +23,6 @@ import pytest from ..testing import (clear_and_catch_warnings, suppress_warnings, assert_arr_dict_equal) -from ..deprecator import ExpiredDeprecationError from .test_arrayproxy import check_mmap from . import test_spatialimages as tsi @@ -262,12 +261,6 @@ def test_affine_regression(): assert_almost_equal(hdr.get_affine(), exp_affine) -def test_get_voxel_size_deprecated(): - hdr = PARRECHeader(HDR_INFO, HDR_DEFS) - with pytest.raises(ExpiredDeprecationError): - hdr.get_voxel_size() - - def test_get_sorted_slice_indices(): # Test sorted slice indices hdr = PARRECHeader(HDR_INFO, HDR_DEFS) From 4d4fc99ddde6dce1d54ad21ebb7e76fcf7392f64 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 2 Oct 2022 20:53:06 -0400 Subject: [PATCH 092/702] MNT: Remove nibabel.py3k --- nibabel/conftest.py | 4 ---- nibabel/py3k.py | 9 --------- nibabel/tests/test_removalschedule.py | 2 +- 3 files changed, 1 insertion(+), 14 deletions(-) delete mode 100644 nibabel/py3k.py diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 697eef4ad6..1f9ecd09cf 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -1,9 +1,5 @@ import pytest -# Pre-load deprecated modules to avoid cluttering warnings -with pytest.warns(FutureWarning): - import nibabel.py3k - # Ignore warning requesting help with nicom with pytest.warns(UserWarning): import nibabel.nicom diff --git a/nibabel/py3k.py b/nibabel/py3k.py deleted file mode 100644 index 02dd1f16e7..0000000000 --- a/nibabel/py3k.py +++ /dev/null @@ -1,9 +0,0 @@ -import warnings - -warnings.warn("We no longer carry a copy of the 'py3k' module in nibabel; " - "Please import from the 'numpy.compat.py3k' module directly. " - "Full removal scheduled for nibabel 4.0.", - FutureWarning, - stacklevel=2) - -from numpy.compat.py3k import * # noqa diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index abafe4a1a7..c54a069e55 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -4,7 +4,7 @@ import pytest MODULE_SCHEDULE = [ - ("5.0.0", ["nibabel.keywordonly"]), + ("5.0.0", ["nibabel.keywordonly", "nibabel.py3k"]), ("4.0.0", ["nibabel.trackvis"]), ("3.0.0", ["nibabel.minc", "nibabel.checkwarns"]), # Verify that the test will be quiet if the schedule outlives the modules From b84e0e19c8102de6e43129bc91af7e5bd201dd5b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 18 Dec 2022 11:16:23 -0500 Subject: [PATCH 093/702] RF: Write DFT database manager as object This adds a dft._DB class that handles the _init_db and _db_(no)change functions. The default instance remains at dft.DB, but this allows us to create new instances for testing purposes. --- nibabel/dft.py | 114 +++++++++++++++++++++++++++---------------------- 1 file changed, 62 insertions(+), 52 deletions(-) diff --git a/nibabel/dft.py b/nibabel/dft.py index f47d70ccb6..51b6424a84 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -11,6 +11,7 @@ """ +import contextlib import os from os.path import join as pjoin import tempfile @@ -74,7 +75,7 @@ def __getattribute__(self, name): val = object.__getattribute__(self, name) if name == 'series' and val is None: val = [] - with _db_nochange() as c: + with DB.readonly_cursor() as c: c.execute("SELECT * FROM series WHERE study = ?", (self.uid, )) cols = [el[0] for el in c.description] for row in c: @@ -106,7 +107,7 @@ def __getattribute__(self, name): val = object.__getattribute__(self, name) if name == 'storage_instances' and val is None: val = [] - with _db_nochange() as c: + with DB.readonly_cursor() as c: query = """SELECT * FROM storage_instance WHERE series = ? @@ -227,7 +228,7 @@ def __init__(self, d): def __getattribute__(self, name): val = object.__getattribute__(self, name) if name == 'files' and val is None: - with _db_nochange() as c: + with DB.readonly_cursor() as c: query = """SELECT directory, name FROM file WHERE storage_instance = ? @@ -241,34 +242,6 @@ def dicom(self): return pydicom.read_file(self.files[0]) -class _db_nochange: - """context guard for read-only database access""" - - def __enter__(self): - self.c = DB.cursor() - return self.c - - def __exit__(self, type, value, traceback): - if type is None: - self.c.close() - DB.rollback() - - -class _db_change: - """context guard for database access requiring a commit""" - - def __enter__(self): - self.c = DB.cursor() - return self.c - - def __exit__(self, type, value, traceback): - if type is None: - self.c.close() - DB.commit() - else: - DB.rollback() - - def _get_subdirs(base_dir, files_dict=None, followlinks=False): dirs = [] for (dirpath, dirnames, filenames) in os.walk(base_dir, followlinks=followlinks): @@ -288,7 +261,7 @@ def update_cache(base_dir, followlinks=False): for d in dirs: os.stat(d) mtimes[d] = os.stat(d).st_mtime - with _db_nochange() as c: + with DB.readwrite_cursor() as c: c.execute("SELECT path, mtime FROM directory") db_mtimes = dict(c) c.execute("SELECT uid FROM study") @@ -297,7 +270,6 @@ def update_cache(base_dir, followlinks=False): series = [row[0] for row in c] c.execute("SELECT uid FROM storage_instance") storage_instances = [row[0] for row in c] - with _db_change() as c: for dir in sorted(mtimes.keys()): if dir in db_mtimes and mtimes[dir] <= db_mtimes[dir]: continue @@ -316,7 +288,7 @@ def get_studies(base_dir=None, followlinks=False): if base_dir is not None: update_cache(base_dir, followlinks) if base_dir is None: - with _db_nochange() as c: + with DB.readonly_cursor() as c: c.execute("SELECT * FROM study") studies = [] cols = [el[0] for el in c.description] @@ -331,7 +303,7 @@ def get_studies(base_dir=None, followlinks=False): WHERE uid IN (SELECT storage_instance FROM file WHERE directory = ?))""" - with _db_nochange() as c: + with DB.readonly_cursor() as c: study_uids = {} for dir in _get_subdirs(base_dir, followlinks=followlinks): c.execute(query, (dir, )) @@ -443,7 +415,7 @@ def _update_file(c, path, fname, studies, series, storage_instances): def clear_cache(): - with _db_change() as c: + with DB.readwrite_cursor() as c: c.execute("DELETE FROM file") c.execute("DELETE FROM directory") c.execute("DELETE FROM storage_instance") @@ -478,26 +450,64 @@ def clear_cache(): mtime INTEGER NOT NULL, storage_instance TEXT DEFAULT NULL REFERENCES storage_instance, PRIMARY KEY (directory, name))""") -DB_FNAME = pjoin(tempfile.gettempdir(), f'dft.{getpass.getuser()}.sqlite') -DB = None -def _init_db(verbose=True): - """ Initialize database """ - if verbose: - logger.info('db filename: ' + DB_FNAME) - global DB - DB = sqlite3.connect(DB_FNAME, check_same_thread=False) - with _db_change() as c: - c.execute("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'") - if c.fetchone()[0] == 0: - logger.debug('create') - for q in CREATE_QUERIES: - c.execute(q) +class _DB: + def __init__(self, fname=None, verbose=True): + self.fname = fname or pjoin(tempfile.gettempdir(), f'dft.{getpass.getuser()}.sqlite') + self.verbose = verbose + + @property + def session(self): + """Get sqlite3 Connection + + The connection is created on the first call of this property + """ + try: + return self._session + except AttributeError: + self._init_db() + return self._session + + def _init_db(self): + if self.verbose: + logger.info('db filename: ' + self.fname) + + self._session = sqlite3.connect(self.fname, isolation_level="EXCLUSIVE") + with self.readwrite_cursor() as c: + c.execute("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'") + if c.fetchone()[0] == 0: + logger.debug('create') + for q in CREATE_QUERIES: + c.execute(q) + + def __repr__(self): + return f"" + + @contextlib.contextmanager + def readonly_cursor(self): + cursor = self.session.cursor() + try: + yield cursor + finally: + cursor.close() + self.session.rollback() + + @contextlib.contextmanager + def readwrite_cursor(self): + cursor = self.session.cursor() + try: + yield cursor + except Exception: + self.session.rollback() + raise + finally: + cursor.close() + self.session.commit() +DB = None if os.name == 'nt': warnings.warn('dft needs FUSE which is not available for windows') else: - _init_db() -# eof + DB = _DB() From 32e02d728654571c55e34965ada010de09fd1741 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 19 Dec 2022 20:14:50 -0500 Subject: [PATCH 094/702] TEST: Create fresh in-memory database for each dft unit test --- nibabel/tests/test_dft.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index b00c136312..30fafcd8db 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -29,12 +29,21 @@ def setUpModule(): raise unittest.SkipTest('Need pydicom for dft tests, skipping') -def test_init(): +@pytest.fixture +def db(monkeypatch): + """Build a dft database in memory to avoid cross-process races + and not modify the host filesystem.""" + database = dft._DB(fname=":memory:") + monkeypatch.setattr(dft, "DB", database) + yield database + + +def test_init(db): dft.clear_cache() dft.update_cache(data_dir) -def test_study(): +def test_study(db): studies = dft.get_studies(data_dir) assert len(studies) == 1 assert (studies[0].uid == @@ -48,7 +57,7 @@ def test_study(): assert studies[0].patient_sex == 'F' -def test_series(): +def test_series(db): studies = dft.get_studies(data_dir) assert len(studies[0].series) == 1 ser = studies[0].series[0] @@ -62,7 +71,7 @@ def test_series(): assert ser.bits_stored == 12 -def test_storage_instances(): +def test_storage_instances(db): studies = dft.get_studies(data_dir) sis = studies[0].series[0].storage_instances assert len(sis) == 2 @@ -74,19 +83,19 @@ def test_storage_instances(): '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.1') -def test_storage_instance(): +def test_storage_instance(db): pass @unittest.skipUnless(have_pil, 'could not import PIL.Image') -def test_png(): +def test_png(db): studies = dft.get_studies(data_dir) data = studies[0].series[0].as_png() im = PImage.open(BytesIO(data)) assert im.size == (256, 256) -def test_nifti(): +def test_nifti(db): studies = dft.get_studies(data_dir) data = studies[0].series[0].as_nifti() assert len(data) == 352 + 2 * 256 * 256 * 2 From 32bc89acaa4f5887036aa71f7c7c28092d0d224a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 20 Dec 2022 08:33:53 -0500 Subject: [PATCH 095/702] TEST: Test _DB class, increase coverage a bit --- nibabel/tests/test_dft.py | 49 +++++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index 30fafcd8db..61e031b8d3 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -5,6 +5,7 @@ from os.path import join as pjoin, dirname from io import BytesIO from ..testing import suppress_warnings +import sqlite3 with suppress_warnings(): from .. import dft @@ -29,6 +30,24 @@ def setUpModule(): raise unittest.SkipTest('Need pydicom for dft tests, skipping') +class Test_DBclass: + """Some tests on the database manager class that don't get exercised through the API""" + def setup_method(self): + self._db = dft._DB(fname=":memory:", verbose=False) + + def test_repr(self): + assert repr(self._db) == "" + + def test_cursor_conflict(self): + rwc = self._db.readwrite_cursor + statement = ("INSERT INTO directory (path, mtime) VALUES (?, ?)", ("/tmp", 0)) + with pytest.raises(sqlite3.IntegrityError): + # Whichever exits first will commit and make the second violate uniqueness + with rwc() as c1, rwc() as c2: + c1.execute(*statement) + c2.execute(*statement) + + @pytest.fixture def db(monkeypatch): """Build a dft database in memory to avoid cross-process races @@ -41,20 +60,24 @@ def db(monkeypatch): def test_init(db): dft.clear_cache() dft.update_cache(data_dir) + # Verify a second update doesn't crash + dft.update_cache(data_dir) def test_study(db): - studies = dft.get_studies(data_dir) - assert len(studies) == 1 - assert (studies[0].uid == - '1.3.12.2.1107.5.2.32.35119.30000010011408520750000000022') - assert studies[0].date == '20100114' - assert studies[0].time == '121314.000000' - assert studies[0].comments == 'dft study comments' - assert studies[0].patient_name == 'dft patient name' - assert studies[0].patient_id == '1234' - assert studies[0].patient_birth_date == '19800102' - assert studies[0].patient_sex == 'F' + # First pass updates the cache, second pass reads it out + for base_dir in (data_dir, None): + studies = dft.get_studies(base_dir) + assert len(studies) == 1 + assert (studies[0].uid == + '1.3.12.2.1107.5.2.32.35119.30000010011408520750000000022') + assert studies[0].date == '20100114' + assert studies[0].time == '121314.000000' + assert studies[0].comments == 'dft study comments' + assert studies[0].patient_name == 'dft patient name' + assert studies[0].patient_id == '1234' + assert studies[0].patient_birth_date == '19800102' + assert studies[0].patient_sex == 'F' def test_series(db): @@ -83,10 +106,6 @@ def test_storage_instances(db): '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.1') -def test_storage_instance(db): - pass - - @unittest.skipUnless(have_pil, 'could not import PIL.Image') def test_png(db): studies = dft.get_studies(data_dir) From 7e96a944a3c34fc2364ab0de7ba5fe1cd50a7444 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 08:56:54 -0500 Subject: [PATCH 096/702] TEST: Add expires deprecator to xfail on ExpiredDeprecationError --- nibabel/testing/__init__.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index b54c138bf0..8c9411ec91 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -16,6 +16,7 @@ import unittest +import pytest import numpy as np from numpy.testing import assert_array_equal @@ -223,3 +224,15 @@ def setUp(self): if self.__class__.__name__.startswith('_'): raise unittest.SkipTest("Base test case - subclass to run") super().setUp() + + +def expires(version): + "Decorator to mark a test as xfail with ExpiredDeprecationError after version" + from packaging.version import Version + from nibabel import __version__ as nbver + from nibabel.deprecator import ExpiredDeprecationError + + if Version(nbver) < Version(version): + return lambda x: x + + return pytest.mark.xfail(raises=ExpiredDeprecationError) From 9ed6c45ab63e4d7e6762aecbf569d8b01b2a1eef Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 08:57:23 -0500 Subject: [PATCH 097/702] TEST: Pass pytest marks from validate_ methods to generated test_ methods --- nibabel/tests/test_api_validators.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index f1c592ce13..54c1c0fd95 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -20,6 +20,8 @@ def meth(self): validator(self, imaker, params) meth.__name__ = 'test_' + name[len('validate_'):] meth.__doc__ = f'autogenerated test from {klass.__name__}.{name}' + if hasattr(validator, 'pytestmark'): + meth.pytestmark = validator.pytestmark return meth for name in dir(klass): if not name.startswith('validate_'): From f2d063b994e6b7b2eae6edc8bf62c04ae7c40883 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 09:10:22 -0500 Subject: [PATCH 098/702] TEST: Expire some deprecation tests --- nibabel/tests/test_analyze.py | 10 +--------- nibabel/tests/test_image_api.py | 4 ++-- nibabel/tests/test_image_load_save.py | 2 ++ nibabel/tests/test_loadsave.py | 3 +++ nibabel/tests/test_onetime.py | 2 ++ nibabel/tests/test_orientations.py | 2 ++ nibabel/tests/test_spatialimages.py | 4 +++- 7 files changed, 15 insertions(+), 12 deletions(-) diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index d91769bc73..7f32e2d8a7 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -716,7 +716,7 @@ def test_default_header(self): def test_data_hdr_cache(self): # test the API for loaded images, such that the data returned - # from np.asanyarray(img.dataobj) and img,get_fdata() are not + # from np.asanyarray(img.dataobj) and img.get_fdata() are not # affected by subsequent changes to the header. IC = self.image_class # save an image to a file map @@ -740,14 +740,6 @@ def test_data_hdr_cache(self): assert hdr.get_data_dtype() == np.dtype(np.uint8) assert_array_equal(img2.get_fdata(), data) assert_array_equal(np.asanyarray(img2.dataobj), data) - # now check read_img_data function - here we do see the changed - # header - with pytest.deprecated_call(match="from version: 3.2"): - sc_data = read_img_data(img2) - assert sc_data.shape == (3, 2, 2) - with pytest.deprecated_call(match="from version: 3.2"): - us_data = read_img_data(img2, prefer='unscaled') - assert us_data.shape == (3, 2, 2) def test_affine_44(self): IC = self.image_class diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 16003fd79c..c84ad3436f 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -48,7 +48,7 @@ from numpy.testing import assert_almost_equal, assert_array_equal, assert_warns, assert_allclose from nibabel.testing import (bytesio_round_trip, bytesio_filemap, assert_data_similar, - clear_and_catch_warnings, nullcontext) + clear_and_catch_warnings, nullcontext, expires) from ..tmpdirs import InTemporaryDirectory from .test_api_validators import ValidateAPI @@ -170,8 +170,8 @@ def validate_no_slicing(self, imaker, params): with pytest.raises(TypeError): img[:] + @expires("5.0.0") def validate_get_data_deprecated(self, imaker, params): - # Check deprecated header API img = imaker() with pytest.deprecated_call(): data = img.get_data() diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 12a49ecd7d..c23d145a36 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -29,6 +29,7 @@ from ..volumeutils import native_code, swapped_code from ..optpkg import optional_package from ..spatialimages import SpatialImage +from ..testing import expires from numpy.testing import assert_array_equal, assert_array_almost_equal import pytest @@ -270,6 +271,7 @@ def test_filename_save(): shutil.rmtree(pth) +@expires('5.0.0') def test_guessed_image_type(): # Test whether we can guess the image type from example files with pytest.deprecated_call(): diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index c58b95d8e8..799952b57d 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -14,6 +14,7 @@ from ..filebasedimages import ImageFileError from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory from ..openers import Opener +from ..testing import expires from ..optpkg import optional_package _, have_scipy, _ = optional_package('scipy') @@ -27,6 +28,7 @@ data_path = pjoin(dirname(__file__), 'data') +@expires("5.0.0") def test_read_img_data(): fnames_test = [ 'example4d.nii.gz', @@ -120,6 +122,7 @@ def test_signature_matches_extension(tmp_path): assert msg == "" +@expires("5.0.0") def test_read_img_data_nifti(): shape = (2, 3, 4) data = np.random.normal(size=shape) diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index 3f0c25a7d3..83700567b7 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,7 +1,9 @@ import pytest from nibabel.onetime import auto_attr, setattr_on_read +from nibabel.testing import expires +@expires('5.0.0') def test_setattr_on_read(): with pytest.deprecated_call(): class MagicProp: diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 77b892acbc..0b3b8081d0 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -20,6 +20,7 @@ ornt2axcodes, axcodes2ornt, aff2axcodes) from ..affines import from_matvec, to_matvec +from ..testing import expires IN_ARRS = [np.eye(4), @@ -353,6 +354,7 @@ def test_inv_ornt_aff(): inv_ornt_aff([[0, 1], [1, -1], [np.nan, np.nan]], (3, 4, 5)) +@expires('5.0.0') def test_flip_axis_deprecation(): a = np.arange(24).reshape((2, 3, 4)) axis = 1 diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index fc11452151..d8cf6d824d 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -26,7 +26,8 @@ bytesio_round_trip, clear_and_catch_warnings, suppress_warnings, - memmap_after_ufunc + memmap_after_ufunc, + expires, ) from ..tmpdirs import InTemporaryDirectory @@ -358,6 +359,7 @@ def test_get_fdata(self): assert rt_img.get_fdata() is not out_data assert (rt_img.get_fdata() == in_data).all() + @expires("5.0.0") def test_get_data(self): # Test array image and proxy image interface img_klass = self.image_class From 1b05c0fab94228b589301402ffc4cfa8b05e982d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 09:11:53 -0500 Subject: [PATCH 099/702] TEST: Move slicing test from test_get_data to test_slicer --- nibabel/tests/test_spatialimages.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index d8cf6d824d..90ae7c5fc2 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -366,15 +366,6 @@ def test_get_data(self): in_data_template = np.arange(24, dtype=np.int16).reshape((2, 3, 4)) in_data = in_data_template.copy() img = img_klass(in_data, None) - # Can't slice into the image object: - with pytest.raises(TypeError) as exception_manager: - img[0, 0, 0] - # Make sure the right message gets raised: - assert (str(exception_manager.value) == - "Cannot slice image objects; consider using " - "`img.slicer[slice]` to generate a sliced image (see " - "documentation for caveats) or slicing image array data " - "with `img.dataobj[slice]` or `img.get_fdata()[slice]`") assert in_data is img.dataobj with pytest.deprecated_call(): out_data = img.get_data() @@ -413,6 +404,16 @@ def test_slicer(self): in_data = in_data_template.copy().reshape(dshape) img = img_klass(in_data, base_affine.copy()) + # Can't slice into the image object: + with pytest.raises(TypeError) as exception_manager: + img[0, 0, 0] + # Make sure the right message gets raised: + assert (str(exception_manager.value) == + "Cannot slice image objects; consider using " + "`img.slicer[slice]` to generate a sliced image (see " + "documentation for caveats) or slicing image array data " + "with `img.dataobj[slice]` or `img.get_fdata()[slice]`") + if not spatial_axes_first(img): with pytest.raises(ValueError): img.slicer From 8a066c0b5362cc3a8d5c91b61f9cabc2f82102bd Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 09:32:42 -0500 Subject: [PATCH 100/702] DOC: Truncate docstrings of expired functions This makes the expiration more obvious than a single line at the end, as well as prevents doctests from raising ExpiredDeprecationErrors --- nibabel/deprecator.py | 14 ++++++++++++-- nibabel/tests/test_deprecator.py | 13 ++++++++----- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 81e93d868e..031a05e601 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -182,8 +182,18 @@ def deprecated_func(*args, **kwargs): warnings.warn(message, warn_class, stacklevel=2) return func(*args, **kwargs) - deprecated_func.__doc__ = _add_dep_doc(deprecated_func.__doc__, - message, TESTSETUP, TESTCLEANUP) + keep_doc = deprecated_func.__doc__ + setup = TESTSETUP + cleanup = TESTCLEANUP + # After expiration, remove all but the first paragraph. + # The details are no longer relevant, but any code will likely + # raise exceptions we don't need. + if keep_doc and until and self.is_bad_version(until): + lines = '\n'.join(line.rstrip() for line in keep_doc.splitlines()) + keep_doc = lines.split('\n\n', 1)[0] + setup = '' + cleanup = '' + deprecated_func.__doc__ = _add_dep_doc(keep_doc, message, setup, cleanup) return deprecated_func return deprecator diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 2e7a0b9ba9..0280692299 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -111,11 +111,14 @@ def test_dep_func(self): 'foo\n\n* deprecated from version: 1.2\n* Raises ' f'{ExpiredDeprecationError} as of version: 1.8\n') func = dec('foo', '1.2', '1.8')(func_doc_long) - assert (func.__doc__ == - 'A docstring\n \n foo\n \n * deprecated from version: 1.2\n ' - f'* Raises {ExpiredDeprecationError} as of version: 1.8\n \n' - f'{indent(TESTSETUP, " ", lambda x: True)}' - f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}') + assert func.__doc__ == f"""\ +A docstring + +foo + +* deprecated from version: 1.2 +* Raises {ExpiredDeprecationError} as of version: 1.8 +""" with pytest.raises(ExpiredDeprecationError): func() From 98e20ae248085db0fa7354bb2c648613150e2b76 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 21:28:37 -0500 Subject: [PATCH 101/702] TEST: Remove some get_data tests --- nibabel/tests/test_image_api.py | 23 +---------------------- nibabel/tests/test_spatialimages.py | 4 ---- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index c84ad3436f..b52ec74734 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -209,7 +209,7 @@ class DataInterfaceMixin(GetSetDtypeMixin): Use this mixin if your image has a ``dataobj`` property that contains an array or an array-like thing. """ - meth_names = ('get_fdata', 'get_data') + meth_names = ('get_fdata',) def validate_data_interface(self, imaker, params): # Check get data returns array, and caches @@ -304,27 +304,6 @@ def _check_proxy_interface(self, imaker, meth_name): with maybe_deprecated(meth_name): data_again = method() assert data is data_again - # Check the interaction of caching with get_data, get_fdata. - # Caching for `get_data` should have no effect on caching for - # get_fdata, and vice versa. - # Modify the cached data - data[:] = 43 - # Load using the other data fetch method - other_name = set(self.meth_names).difference({meth_name}).pop() - other_method = getattr(img, other_name) - with maybe_deprecated(other_name): - other_data = other_method() - # We get the original data, not the modified cache - assert_array_equal(proxy_data, other_data) - assert not np.all(data == other_data) - # We can modify the other cache, without affecting the first - other_data[:] = 44 - with maybe_deprecated(other_name): - assert_array_equal(other_method(), 44) - with pytest.deprecated_call(): - assert not np.all(method() == other_method()) - if meth_name != 'get_fdata': - return # Check that caching refreshes for new floating point type. img.uncache() fdata = img.get_fdata() diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 90ae7c5fc2..52eff4be72 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -522,13 +522,9 @@ def test_slicer(self): pass else: sliced_data = in_data[sliceobj] - with pytest.deprecated_call(): - assert (sliced_data == sliced_img.get_data()).all() assert (sliced_data == sliced_img.get_fdata()).all() assert (sliced_data == sliced_img.dataobj).all() assert (sliced_data == img.dataobj[sliceobj]).all() - with pytest.deprecated_call(): - assert (sliced_data == img.get_data()[sliceobj]).all() assert (sliced_data == img.get_fdata()[sliceobj]).all() From 37110cf33923c97391d23d93188ef4155f899254 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 21:29:03 -0500 Subject: [PATCH 102/702] TEST: Suppress pytest warning, call TestCase.setup setup_method --- nibabel/tests/test_image_api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index b52ec74734..a12227a894 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -537,7 +537,7 @@ def validate_to_from_bytes(self, imaker, params): del img_b @pytest.fixture(autouse=True) - def setup(self, httpserver, tmp_path): + def setup_method(self, httpserver, tmp_path): """Make pytest fixtures available to validate functions""" self.httpserver = httpserver self.tmp_path = tmp_path @@ -767,7 +767,7 @@ class TestMinc1API(ImageHeaderAPI): class TestMinc2API(TestMinc1API): - def setup(self): + def setup_method(self): if not have_h5py: raise unittest.SkipTest('Need h5py for these tests') From bc45f47442e1894d3ce6cfb5c1037df8f6d48c50 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 24 Dec 2022 21:32:22 -0500 Subject: [PATCH 103/702] ENH: Add auto_attr test --- nibabel/tests/test_onetime.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index 83700567b7..c1609980a3 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -17,3 +17,17 @@ def a(self): assert 'a' in x.__dict__ # Each call to object() produces a unique object. Verify we get the same one every time. assert x.a is obj + + +def test_auto_attr(): + class MagicProp: + @auto_attr + def a(self): + return object() + + x = MagicProp() + assert 'a' not in x.__dict__ + obj = x.a + assert 'a' in x.__dict__ + # Each call to object() produces a unique object. Verify we get the same one every time. + assert x.a is obj From c37bcec25c8e6c320b8b119679bdec658d487f0a Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 16:13:46 +0200 Subject: [PATCH 104/702] Removed unused sys import and sphinx extension instructions. --- doc/source/conf.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 6155d0bc3b..ea5f94ea71 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -19,10 +19,9 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os -from runpy import run_path from configparser import ConfigParser +from runpy import run_path # Check for external Sphinx extensions we depend on try: @@ -41,11 +40,6 @@ raise RuntimeError('Need nibabel on Python PATH; consider "make htmldoc" ' 'from nibabel root directory') -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.append(os.path.abspath('../sphinxext')) - # -- General configuration ---------------------------------------------------- # We load the nibabel release info into a dict by explicit execution From f9f69bdd802a8f96836514e52e64be0bd8d9bfdc Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 16:21:55 +0200 Subject: [PATCH 105/702] Added generated docs directory. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 1ba201f711..744dc5becd 100644 --- a/.gitignore +++ b/.gitignore @@ -83,6 +83,7 @@ Thumbs.db # Things specific to this project # ################################### doc/source/reference +doc/source/generated venv/ .buildbot.patch .vscode From f954989c4165522bb13f870aa8e0482a1283011f Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 16:22:55 +0200 Subject: [PATCH 106/702] Fixed toml formatting. --- pyproject.toml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 01d06cdfbc..b5b6c0e52d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,14 +5,10 @@ build-backend = "setuptools.build_meta:__legacy__" [project] name = "nibabel" description = "Access a multitude of neuroimaging data formats" -authors = [ - { name = "nibabel developers", email = "neuroimaging@python.org" }, -] -maintainers = [ - { name = "Christopher Markiewicz" }, -] +authors = [{ name = "NiBabel developers", email = "neuroimaging@python.org" }] +maintainers = [{ name = "Christopher Markiewicz" }] readme = "README.rst" -license = { text="MIT License" } +license = { text = "MIT License" } requires-python = ">=3.7" dependencies = ["numpy >=1.17", "packaging >=17", "setuptools"] classifiers = [ From e6477cb42f17d08478eafb695958ddb865186b72 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 16:29:25 +0200 Subject: [PATCH 107/702] Fixed conf.py metadata read error Added toml to docs requirements and replaced the ConfigParser metadata read. Sphinx also needed to be upgraded for a successful build. This resolves #1160. --- doc/source/conf.py | 13 ++++++++----- setup.cfg | 3 ++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index ea5f94ea71..cb8ad8292e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -20,9 +20,11 @@ # serve to show the default. import os -from configparser import ConfigParser +from pathlib import Path from runpy import run_path +import toml + # Check for external Sphinx extensions we depend on try: import numpydoc @@ -50,9 +52,8 @@ fobj.write(rel['long_description']) # Load metadata from setup.cfg -config = ConfigParser() -config.read(os.path.join('..', '..', 'setup.cfg')) -metadata = config['metadata'] +pyproject_dict = toml.load(Path("../../pyproject.toml")) +metadata = pyproject_dict["project"] # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. @@ -86,7 +87,9 @@ # General information about the project. project = u'NiBabel' -copyright = f"2006-2022, {metadata['maintainer']} <{metadata['author_email']}>" +author_name = metadata["authors"][0]["name"] +author_email = metadata["authors"][0]["email"] +copyright = f"2006-2022, {author_name} <{author_email}>" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/setup.cfg b/setup.cfg index ef1c140a29..336958c605 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,8 +10,9 @@ dev = doc = matplotlib >= 1.5.3 numpydoc - sphinx >=0.3,<3 + sphinx ~= 5.3 texext + toml minc2 = h5py spm = From 36d96f204768b49b1fe3faa12627ea6bb138292e Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 17:10:28 +0200 Subject: [PATCH 108/702] Fixed RST errors and warnings. --- doc/source/devel/biaps/biap_0003.rst | 8 +++++--- nibabel/ecat.py | 2 +- nibabel/gifti/__init__.py | 1 - nibabel/tmpdirs.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/source/devel/biaps/biap_0003.rst b/doc/source/devel/biaps/biap_0003.rst index e409a5243f..ee3dd56ada 100644 --- a/doc/source/devel/biaps/biap_0003.rst +++ b/doc/source/devel/biaps/biap_0003.rst @@ -199,8 +199,10 @@ The base level header will usually also have image metadata fields giving information about the whole image. A field is an "image metadata field" if it is defined at the top level of the header. For example:: ->>> hdr = dict(nipy_header_version='1.0', -... Manufacturer="SIEMENS") +.. code-block:: python + + hdr = dict(nipy_header_version='1.0', + Manufacturer="SIEMENS") All image metadata fields are optional. @@ -635,7 +637,7 @@ Use case ^^^^^^^^ When doing motion correction on a 4D image, we calculate the required affine -transformation from |--| say |--| the second image to the first image; the +transformation from, say, the second image to the first image; the third image to the first image; etc. If there are N volumes in the 4D image, we would need to store N-1 affine transformations. If we have registered to the mean volume of the volume series instead of one of the volumes in the diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 54f600f147..3405a4210c 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -520,7 +520,7 @@ def __init__(self, hdr, mlist, fileobj): there is one subheader for each frame in the ecat file Parameters - ----------- + ---------- hdr : EcatHeader ECAT main header mlist : array shape (N, 4) diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index 54bfbd0ffa..5f3519e2c4 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -13,7 +13,6 @@ .. autosummary:: :toctree: ../generated - giftiio gifti """ diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 10b5ee78f5..f4874e7b4d 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -52,7 +52,7 @@ class InTemporaryDirectory(TemporaryDirectory): """ Create, return, and change directory to a temporary directory Notes - ------ + ----- As its name suggests, the class temporarily changes the working directory of the Python process, and this is not thread-safe. We suggest using it only for tests. From fb31b2abe43e48a25439de2f1f1b4c948edff2ae Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 17:55:18 +0200 Subject: [PATCH 109/702] Update doc/source/devel/biaps/biap_0003.rst Co-authored-by: Chris Markiewicz --- doc/source/devel/biaps/biap_0003.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/source/devel/biaps/biap_0003.rst b/doc/source/devel/biaps/biap_0003.rst index ee3dd56ada..3b4bdad24e 100644 --- a/doc/source/devel/biaps/biap_0003.rst +++ b/doc/source/devel/biaps/biap_0003.rst @@ -199,10 +199,8 @@ The base level header will usually also have image metadata fields giving information about the whole image. A field is an "image metadata field" if it is defined at the top level of the header. For example:: -.. code-block:: python - - hdr = dict(nipy_header_version='1.0', - Manufacturer="SIEMENS") + >>> hdr = dict(nipy_header_version='1.0', + ... Manufacturer="SIEMENS") All image metadata fields are optional. From 4b693b84fd267f4a53a65070487c2d90d8f64ad6 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 18:04:20 +0200 Subject: [PATCH 110/702] Changed toml dependency to tomli. --- doc/source/conf.py | 8 ++++++-- setup.cfg | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index cb8ad8292e..f6f4ec971a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -23,7 +23,10 @@ from pathlib import Path from runpy import run_path -import toml +try: + import tomllib +except ImportError: + import tomli as tomllib # Check for external Sphinx extensions we depend on try: @@ -52,7 +55,8 @@ fobj.write(rel['long_description']) # Load metadata from setup.cfg -pyproject_dict = toml.load(Path("../../pyproject.toml")) +with open(Path("../../pyproject.toml"), 'rb') as f: + pyproject_dict = tomllib.load(f) metadata = pyproject_dict["project"] # Add any Sphinx extension module names here, as strings. They can be diff --git a/setup.cfg b/setup.cfg index 336958c605..08595dc324 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,7 @@ doc = numpydoc sphinx ~= 5.3 texext - toml + tomli; python_version < "3.11" minc2 = h5py spm = From 5ea83668dd3a828daece243c8424382b837e3a8e Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 18:04:37 +0200 Subject: [PATCH 111/702] Fixed more minor RST warnings. --- doc/source/devel/biaps/biap_0004.rst | 1 + doc/source/devel/biaps/biap_0006.rst | 2 +- nibabel/processing.py | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/source/devel/biaps/biap_0004.rst b/doc/source/devel/biaps/biap_0004.rst index b88dd3b779..d8ac1569af 100644 --- a/doc/source/devel/biaps/biap_0004.rst +++ b/doc/source/devel/biaps/biap_0004.rst @@ -221,6 +221,7 @@ Improving access to varying meta data through the Nifti Currently, when accessing varying meta data through the `get_meta` method you can only get one value at a time:: + >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx)) for idx in xrange(data.shape[-1])] diff --git a/doc/source/devel/biaps/biap_0006.rst b/doc/source/devel/biaps/biap_0006.rst index 673318192f..ad4a0f9b8d 100644 --- a/doc/source/devel/biaps/biap_0006.rst +++ b/doc/source/devel/biaps/biap_0006.rst @@ -193,7 +193,7 @@ In NIfTI: We saw above that the MGH format refers to a volume (in our sense) as a *frame*. ECAT has the same usage - a frame is a 3D volume. The fmristat -software uses frame in the same sense |--| e.g. `line 32 of example.m +software uses frame in the same sense, e.g., `line 32 of example.m `_. Unfortunately DICOM appears to use "frame" to mean a 2D slice. For example, diff --git a/nibabel/processing.py b/nibabel/processing.py index b7abfb8c75..c80422ef2d 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -325,11 +325,13 @@ def conform(from_img, Using the default arguments, this function is meant to replicate most parts of FreeSurfer's ``mri_convert --conform`` command. Specifically, this function: + - Resamples data to ``output_shape`` - Resamples voxel sizes to ``voxel_size`` - Reorients to RAS (``mri_convert --conform`` reorients to LIA) Unlike ``mri_convert --conform``, this command does not: + - Transform data to range [0, 255] - Cast to unsigned eight-bit integer From 8369d48f489f6c92fc1cf95eb970ac28882f1f6d Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 28 Dec 2022 18:09:43 +0200 Subject: [PATCH 112/702] Removed whitespace to solve flake8's W293. --- nibabel/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/processing.py b/nibabel/processing.py index c80422ef2d..6b1c2c0a3b 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -325,13 +325,13 @@ def conform(from_img, Using the default arguments, this function is meant to replicate most parts of FreeSurfer's ``mri_convert --conform`` command. Specifically, this function: - + - Resamples data to ``output_shape`` - Resamples voxel sizes to ``voxel_size`` - Reorients to RAS (``mri_convert --conform`` reorients to LIA) Unlike ``mri_convert --conform``, this command does not: - + - Transform data to range [0, 255] - Cast to unsigned eight-bit integer From d5e0e39cc22cf3323247b865b43ccb7ef790afdf Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Thu, 29 Dec 2022 14:17:30 +0200 Subject: [PATCH 113/702] Added imports to resolve Sphinx namespace errors. --- nibabel/batteryrunners.py | 1 + nibabel/quaternions.py | 1 + nibabel/spatialimages.py | 1 + 3 files changed, 3 insertions(+) diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index a860ba3778..336d45387c 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -20,6 +20,7 @@ To run checks only, and return problem report objects: +>>> from nibabel.batteryrunners import BatteryRunner >>> def chk(obj, fix=False): # minimal check ... return obj, Report() >>> btrun = BatteryRunner((chk,)) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 1b8e8b0454..51192ca741 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -19,6 +19,7 @@ they are applied on the left of the vector. For example: >>> import numpy as np +>>> from nibabel.quaternions import quat2mat >>> q = [0, 1, 0, 0] # 180 degree rotation around axis 0 >>> M = quat2mat(q) # from this module >>> vec = np.array([1, 2, 3]).reshape((3,1)) # column vector diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 09744d0149..4aa375155b 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -91,6 +91,7 @@ example, the Analyze data format needs an ``image`` and a ``header`` file type for storage: + >>> import numpy as np >>> import nibabel as nib >>> data = np.arange(24, dtype='f4').reshape((2,3,4)) >>> img = nib.AnalyzeImage(data, np.eye(4)) From 4f847274ced9216e04bde9873456a43e3ccb0d21 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Thu, 29 Dec 2022 14:18:08 +0200 Subject: [PATCH 114/702] Added pytest install in misc workflow to resolve import failures. --- .github/workflows/misc.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 5317b3d811..4f0d0ae733 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -53,7 +53,9 @@ jobs: source tools/ci/build_archive.sh echo "ARCHIVE=$ARCHIVE" >> $GITHUB_ENV - name: Install dependencies - run: tools/ci/install_dependencies.sh + run: | + tools/ci/install_dependencies.sh + pip install pytest - name: Install NiBabel run: tools/ci/install.sh - name: Run tests From d77a7b018ed31e57c676342950e4cc4f73a6e5e4 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Thu, 29 Dec 2022 14:27:50 +0200 Subject: [PATCH 115/702] Removed added pytest install, as it doesn't resolve the import failure. --- .github/workflows/misc.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 4f0d0ae733..5317b3d811 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -53,9 +53,7 @@ jobs: source tools/ci/build_archive.sh echo "ARCHIVE=$ARCHIVE" >> $GITHUB_ENV - name: Install dependencies - run: | - tools/ci/install_dependencies.sh - pip install pytest + run: tools/ci/install_dependencies.sh - name: Install NiBabel run: tools/ci/install.sh - name: Run tests From 68a10e4310772cfa7b76966bd0d33468751a41f3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 14:24:37 -0500 Subject: [PATCH 116/702] MNT: Move optional dependencies to pyproject.toml --- pyproject.toml | 28 ++++++++++++++++++++++++++-- setup.cfg | 40 ---------------------------------------- 2 files changed, 26 insertions(+), 42 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b5b6c0e52d..f1a91b8ba2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,8 +25,7 @@ classifiers = [ "Topic :: Scientific/Engineering", ] # Version from versioneer -# optional-dependencies from setup.cfg (using ConfigParser features) -dynamic = ["version", "optional-dependencies"] +dynamic = ["version"] [project.urls] "Homepage" = "https://nipy.org/nibabel" @@ -45,6 +44,31 @@ nib-trk2tck = "nibabel.cmdline.trk2tck:main" nib-roi = "nibabel.cmdline.roi:main" parrec2nii = "nibabel.cmdline.parrec2nii:main" +[project.optional-dependencies] +dicom = ["pydicom >=1.0.0"] +dicomfs = ["nibabel[dicom]", "pillow"] +dev = ["gitpython", "twine"] +doc = [ + "matplotlib >= 1.5.3", + "numpydoc", + "sphinx ~= 5.3", + "texext", + "tomli; python_version < \"3.11\"", +] +minc2 = ["h5py"] +spm = ["scipy"] +style = ["flake8"] +test = [ + "coverage", + "pytest !=5.3.4", + "pytest-cov", + "pytest-doctestplus", + "pytest-httpserver", + "pytest-xdist", +] +zstd = ["pyzstd >= 0.14.3"] +all = ["nibabel[dicomfs,dev,doc,minc2,spm,style,test,zstd]"] + [tool.setuptools] platforms = ["OS Independent"] provides = ["nibabel", "nisext"] diff --git a/setup.cfg b/setup.cfg index 08595dc324..607771ddc4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,43 +1,3 @@ -[options.extras_require] -dicom = - pydicom >=1.0.0 -dicomfs = - %(dicom)s - pillow -dev = - gitpython - twine -doc = - matplotlib >= 1.5.3 - numpydoc - sphinx ~= 5.3 - texext - tomli; python_version < "3.11" -minc2 = - h5py -spm = - scipy -style = - flake8 -test = - coverage - pytest !=5.3.4 - pytest-cov - pytest-doctestplus - pytest-httpserver - pytest-xdist -zstd = - pyzstd >= 0.14.3 -all = - %(dicomfs)s - %(dev)s - %(doc)s - %(minc2)s - %(spm)s - %(style)s - %(test)s - %(zstd)s - [flake8] max-line-length = 100 ignore = D100,D101,D102,D103,D104,D105,D200,D201,D202,D204,D205,D208,D209,D210,D300,D301,D400,D401,D403,E24,E121,E123,E126,E226,E266,E402,E704,E731,F821,I100,I101,I201,N802,N803,N804,N806,W503,W504,W605 From 5747e4a52b978d41225b25fe3b4eb0bc5a481e06 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 14:25:24 -0500 Subject: [PATCH 117/702] MNT: Sync doc-requirements with pyproject.toml --- doc-requirements.txt | 7 ++++--- tools/update_requirements.py | 6 ++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/doc-requirements.txt b/doc-requirements.txt index c934d76e6b..64830ca962 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -1,6 +1,7 @@ -# Requirements for building docs +# Auto-generated by tools/update_requirements.py -r requirements.txt -sphinx<3 +matplotlib >= 1.5.3 numpydoc +sphinx ~= 5.3 texext -matplotlib >=1.3.1 +tomli; python_version < "3.11" diff --git a/tools/update_requirements.py b/tools/update_requirements.py index b167438c6f..c624d9a8f8 100755 --- a/tools/update_requirements.py +++ b/tools/update_requirements.py @@ -11,10 +11,12 @@ pyproject_toml = repo_root / "pyproject.toml" reqs = repo_root / "requirements.txt" min_reqs = repo_root / "min-requirements.txt" +doc_reqs = repo_root / "doc-requirements.txt" with open(pyproject_toml, 'rb') as fobj: config = tomli.load(fobj) requirements = config["project"]["dependencies"] +doc_requirements = config["project"]["optional-dependencies"]["doc"] script_name = Path(__file__).relative_to(repo_root) @@ -27,3 +29,7 @@ # Write minimum requirements lines[1:-1] = [req.replace(">=", "==").replace("~=", "==") for req in requirements] min_reqs.write_text("\n".join(lines)) + +# Write documentation requirements +lines[1:-1] = ["-r requirements.txt"] + doc_requirements +doc_reqs.write_text("\n".join(lines)) From bbd731dbbea0ce2b26250edab541c2e8f7fce258 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 14:27:42 -0500 Subject: [PATCH 118/702] MNT: Fetch authors a bit more directly --- doc/source/conf.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index f6f4ec971a..775b416b38 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -55,9 +55,9 @@ fobj.write(rel['long_description']) # Load metadata from setup.cfg -with open(Path("../../pyproject.toml"), 'rb') as f: - pyproject_dict = tomllib.load(f) -metadata = pyproject_dict["project"] +with open(Path("../../pyproject.toml"), 'rb') as fobj: + pyproject = tomllib.load(fobj) +authors = pyproject["project"]["authors"][0] # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. @@ -91,9 +91,7 @@ # General information about the project. project = u'NiBabel' -author_name = metadata["authors"][0]["name"] -author_email = metadata["authors"][0]["email"] -copyright = f"2006-2022, {author_name} <{author_email}>" +copyright = f"2006-2022, {authors['name']} <{authors['email']}>" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From bcd8cf92665794937ee4b7940e533814f12ab75a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 14:28:14 -0500 Subject: [PATCH 119/702] DOC: Enable intersphinx --- doc/source/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 775b416b38..964f4d02bc 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -63,7 +63,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', - #'sphinx.ext.intersphinx', + 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.inheritance_diagram', @@ -271,7 +271,7 @@ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {'https://docs.python.org/3/': None} # Config of plot_directive plot_include_source = True From 8d5ab4ea9e85ce063333f36a0795c9515329178e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 14:36:08 -0500 Subject: [PATCH 120/702] DOCTEST: Improt batteryrunners.Report --- nibabel/batteryrunners.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index 336d45387c..fecff5c13b 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -20,7 +20,7 @@ To run checks only, and return problem report objects: ->>> from nibabel.batteryrunners import BatteryRunner +>>> from nibabel.batteryrunners import BatteryRunner, Report >>> def chk(obj, fix=False): # minimal check ... return obj, Report() >>> btrun = BatteryRunner((chk,)) From 7792ff656ea7f0214a8d54de5fcbd36e248ed252 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 14:54:44 -0500 Subject: [PATCH 121/702] DOC: Restore API docs by preventing autosummary from generating a second copy --- doc/source/conf.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 964f4d02bc..1e3d298fdc 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -73,9 +73,10 @@ 'matplotlib.sphinxext.plot_directive', ] -# the following doesn't work with sphinx < 1.0, but will make a separate -# sphinx-autogen run obsolete in the future -#autosummary_generate = True +# Autosummary always wants to use a `generated/` directory. +# We generate with `make api-stamp` +# This could change in the future +autosummary_generate = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] From 68d39a7f2bb5ff977086fbf24283131c1ed56478 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 14:59:57 -0500 Subject: [PATCH 122/702] CI: Add doctest extra that installs doc and test extras --- .github/workflows/misc.yml | 2 +- pyproject.toml | 1 + tools/ci/check.sh | 5 ++--- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 5317b3d811..1890488008 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -24,7 +24,7 @@ jobs: matrix: python-version: ["3.10"] install: ['pip'] - check: ['style', 'doc'] + check: ['style', 'doctest'] pip-flags: [''] depends: ['REQUIREMENTS'] env: diff --git a/pyproject.toml b/pyproject.toml index f1a91b8ba2..053e4c06cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,6 +67,7 @@ test = [ "pytest-xdist", ] zstd = ["pyzstd >= 0.14.3"] +doctest = ["nibabel[doc,test]"] all = ["nibabel[dicomfs,dev,doc,minc2,spm,style,test,zstd]"] [tool.setuptools] diff --git a/tools/ci/check.sh b/tools/ci/check.sh index a96f0874a4..3cfc1e5530 100755 --- a/tools/ci/check.sh +++ b/tools/ci/check.sh @@ -16,9 +16,8 @@ export NIBABEL_DATA_DIR="$PWD/nibabel-data" if [ "${CHECK_TYPE}" == "style" ]; then # Run styles only on core nibabel code. flake8 nibabel -elif [ "${CHECK_TYPE}" == "doc" ]; then - cd doc - make html && make doctest +elif [ "${CHECK_TYPE}" == "doctest" ]; then + make -C doc html && make -C doc doctest elif [ "${CHECK_TYPE}" == "test" ]; then # Change into an innocuous directory and find tests from installation mkdir for_testing From 97203a087d132f9a49ad3810536d0f8e12bcf757 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 16:22:19 -0500 Subject: [PATCH 123/702] DOC: Add some orphaned developer discussions --- doc/source/devel/devdiscuss.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/devel/devdiscuss.rst b/doc/source/devel/devdiscuss.rst index d93fd54a99..c864928d60 100644 --- a/doc/source/devel/devdiscuss.rst +++ b/doc/source/devel/devdiscuss.rst @@ -22,3 +22,7 @@ progress. spm_use modified_images data_pkg_design + data_pkg_discuss + data_pkg_uses + scaling + bv_formats From 5a8165ac0760394b3827ff8745a9e6625e1e968a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 16:28:16 -0500 Subject: [PATCH 124/702] DOC: Mark docs as orphan --- doc/source/devel/image_design.rst | 2 ++ doc/source/installing_data.rst | 2 ++ doc/source/old/ioimplementation.rst | 2 ++ 3 files changed, 6 insertions(+) diff --git a/doc/source/devel/image_design.rst b/doc/source/devel/image_design.rst index 46e7b5b4db..4aa0b18a76 100644 --- a/doc/source/devel/image_design.rst +++ b/doc/source/devel/image_design.rst @@ -1,3 +1,5 @@ +:orphan: + ######################## The nibabel image object ######################## diff --git a/doc/source/installing_data.rst b/doc/source/installing_data.rst index daab142c7a..c1b335fd02 100644 --- a/doc/source/installing_data.rst +++ b/doc/source/installing_data.rst @@ -1,3 +1,5 @@ +:orphan: + .. _installing-data: Installing data packages diff --git a/doc/source/old/ioimplementation.rst b/doc/source/old/ioimplementation.rst index 1d198c27cb..fd7914f467 100644 --- a/doc/source/old/ioimplementation.rst +++ b/doc/source/old/ioimplementation.rst @@ -1,5 +1,7 @@ .. -*- mode: rst -*- +:orphan: + ################################################## Relationship between images and io implementations ################################################## From e86addb701c94597149e832e4a6b846ac6fa0679 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 17:06:01 -0500 Subject: [PATCH 125/702] DOC: Exclude _version from autodoc --- doc/tools/build_modref_templates.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index 6ec6848579..007175a262 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -83,6 +83,7 @@ def abort(error): r'\.info.*$', r'\.pkg_info.*$', r'\.py3k.*$', + r'\._version.*$', ] docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'index', relative_to=outdir) From 478ebb08b2fb536b1861398aec548e5eec6a5dc2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 23:13:25 -0400 Subject: [PATCH 126/702] MNT: Add blue configuration --- pyproject.toml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index b5b6c0e52d..f6730e4152 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,3 +55,13 @@ find = {} [tool.setuptools.package-data] nibabel = ["tests/data/*", "*/tests/data/*", "benchmarks/pytest.benchmark.ini"] + +[tool.blue] +line_length = 99 +target-version = ['py37'] +extend-exclude = ''' +( + _version.py + | nibabel/externals/ +) +''' From 5104cae50c640cc3df1bc8dd7b3e3b9cb35e1593 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 23:14:45 -0400 Subject: [PATCH 127/702] MNT: Add isort configuration --- pyproject.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index f6730e4152..47ee66b024 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,3 +65,8 @@ extend-exclude = ''' | nibabel/externals/ ) ''' + +[tool.isort] +profile = "black" +line_length = 99 +extend_skip = ["_version.py", "externals"] From cc9a0bf92bf4025a8bf0d300d9de8bfb2c245c05 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 22:38:55 -0400 Subject: [PATCH 128/702] STY: Simplify flake8 rules; fix issue found --- nibabel/nifti1.py | 3 --- setup.cfg | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 1bffac10ce..a951522c8d 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -524,9 +524,6 @@ def get_sizeondisk(self): def __repr__(self): return "Nifti1Extensions(%s)" % ', '.join(str(e) for e in self) - def __cmp__(self, other): - return cmp(list(self), list(other)) - def write_to(self, fileobj, byteswap): """ Write header extensions to fileobj diff --git a/setup.cfg b/setup.cfg index 336958c605..0374c54f98 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,12 +40,13 @@ all = [flake8] max-line-length = 100 -ignore = D100,D101,D102,D103,D104,D105,D200,D201,D202,D204,D205,D208,D209,D210,D300,D301,D400,D401,D403,E24,E121,E123,E126,E226,E266,E402,E704,E731,F821,I100,I101,I201,N802,N803,N804,N806,W503,W504,W605 +extend-ignore = E203,E266,E402,E731 exclude = *test* *sphinx* nibabel/externals/* - */__init__.py +per-file-ignores = + */__init__.py: F401 [versioneer] VCS = git From 29c733d558c25237dac2dd1adb975f040be1f669 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 23:21:41 -0400 Subject: [PATCH 129/702] MNT: Add make rule for building an ignore file for git-blame --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2190f815fc..093e177c36 100644 --- a/Makefile +++ b/Makefile @@ -78,6 +78,9 @@ distclean: clean $(WWW_DIR): if [ ! -d $(WWW_DIR) ]; then mkdir -p $(WWW_DIR); fi +.git-blame-ignore-revs: + git log --grep "\[git-blame-ignore-rev\]" --pretty=format:"# %ad - %ae - %s%n%H" \ + > .git-blame-ignore-revs # # Tests @@ -288,4 +291,4 @@ rm-orig: # Remove .orig temporary diff files generated by git find . -name "*.orig" -print | grep -v "fsaverage" | xargs rm -.PHONY: orig-src pylint all build +.PHONY: orig-src pylint all build .git-blame-ignore-revs From 1a8dd302ff85b1136c81d492509b80e7748339f0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 21:30:29 -0500 Subject: [PATCH 130/702] STY: blue Run with a custom patch to avoid realigning inline comments: pipx run --spec git+https://github.com/effigies/blue.git@fix/hanging-comments blue nibabel [git-blame-ignore-rev] --- nibabel/__init__.py | 48 +- nibabel/affines.py | 24 +- nibabel/analyze.py | 189 +++-- nibabel/arrayproxy.py | 125 +-- nibabel/arraywriters.py | 174 ++--- nibabel/batteryrunners.py | 35 +- nibabel/benchmarks/bench_array_to_file.py | 4 +- .../benchmarks/bench_arrayproxy_slicing.py | 8 +- nibabel/benchmarks/bench_fileslice.py | 56 +- nibabel/benchmarks/bench_finite_range.py | 4 +- nibabel/benchmarks/bench_load_save.py | 4 +- nibabel/benchmarks/butils.py | 5 +- nibabel/brikhead.py | 75 +- nibabel/caret.py | 18 +- nibabel/casting.py | 119 +-- nibabel/cifti2/__init__.py | 33 +- nibabel/cifti2/cifti2.py | 364 +++++---- nibabel/cifti2/cifti2_axes.py | 326 ++++---- nibabel/cifti2/parse_cifti2.py | 210 +++--- nibabel/cifti2/tests/test_axes.py | 148 ++-- nibabel/cifti2/tests/test_cifti2.py | 103 ++- nibabel/cifti2/tests/test_cifti2io_axes.py | 115 ++- nibabel/cifti2/tests/test_cifti2io_header.py | 304 ++++---- nibabel/cifti2/tests/test_name.py | 41 +- nibabel/cifti2/tests/test_new_cifti2.py | 212 +++--- nibabel/cmdline/conform.py | 37 +- nibabel/cmdline/convert.py | 38 +- nibabel/cmdline/dicomfs.py | 71 +- nibabel/cmdline/diff.py | 162 ++-- nibabel/cmdline/ls.py | 128 ++-- nibabel/cmdline/nifti_dx.py | 13 +- nibabel/cmdline/parrec2nii.py | 280 ++++--- nibabel/cmdline/roi.py | 47 +- nibabel/cmdline/stats.py | 21 +- nibabel/cmdline/tck2trk.py | 19 +- nibabel/cmdline/tests/test_conform.py | 18 +- nibabel/cmdline/tests/test_convert.py | 43 +- nibabel/cmdline/tests/test_parrec2nii.py | 28 +- nibabel/cmdline/tests/test_roi.py | 90 +-- nibabel/cmdline/tests/test_stats.py | 8 +- nibabel/cmdline/tests/test_utils.py | 448 ++++++++--- nibabel/cmdline/trk2tck.py | 12 +- nibabel/cmdline/utils.py | 22 +- nibabel/data.py | 74 +- nibabel/dataobj_images.py | 43 +- nibabel/deprecated.py | 16 +- nibabel/deprecator.py | 40 +- nibabel/dft.py | 125 +-- nibabel/ecat.py | 117 ++- nibabel/environment.py | 2 +- nibabel/eulerangles.py | 41 +- nibabel/filebasedimages.py | 70 +- nibabel/fileholders.py | 24 +- nibabel/filename_parser.py | 43 +- nibabel/fileslice.py | 103 ++- nibabel/fileutils.py | 2 +- nibabel/freesurfer/__init__.py | 11 +- nibabel/freesurfer/io.py | 76 +- nibabel/freesurfer/mghformat.py | 224 +++--- nibabel/freesurfer/tests/test_io.py | 97 +-- nibabel/freesurfer/tests/test_mghformat.py | 50 +- nibabel/funcs.py | 31 +- nibabel/gifti/__init__.py | 11 +- nibabel/gifti/gifti.py | 211 +++--- nibabel/gifti/parse_gifti_fast.py | 126 ++-- nibabel/gifti/tests/test_1.py | 2 +- nibabel/gifti/tests/test_gifti.py | 55 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 186 +++-- nibabel/gifti/util.py | 39 +- nibabel/imageclasses.py | 40 +- nibabel/imageglobals.py | 5 +- nibabel/imagestats.py | 4 +- nibabel/info.py | 4 +- nibabel/loadsave.py | 45 +- nibabel/minc1.py | 74 +- nibabel/minc2.py | 28 +- nibabel/mriutils.py | 12 +- nibabel/nicom/__init__.py | 13 +- nibabel/nicom/ascconv.py | 26 +- nibabel/nicom/csareader.py | 40 +- nibabel/nicom/dicomreaders.py | 41 +- nibabel/nicom/dicomwrappers.py | 174 +++-- nibabel/nicom/dwiparams.py | 17 +- nibabel/nicom/structreader.py | 10 +- nibabel/nicom/tests/__init__.py | 4 +- nibabel/nicom/tests/data_pkgs.py | 12 +- nibabel/nicom/tests/test_ascconv.py | 32 +- nibabel/nicom/tests/test_csareader.py | 11 +- nibabel/nicom/tests/test_dicomreaders.py | 16 +- nibabel/nicom/tests/test_dicomwrappers.py | 144 ++-- nibabel/nicom/tests/test_dwiparams.py | 11 +- nibabel/nicom/tests/test_structreader.py | 6 +- nibabel/nicom/tests/test_utils.py | 4 +- nibabel/nicom/utils.py | 4 +- nibabel/nifti1.py | 710 +++++++++--------- nibabel/nifti2.py | 39 +- nibabel/onetime.py | 6 +- nibabel/openers.py | 42 +- nibabel/optpkg.py | 10 +- nibabel/orientations.py | 38 +- nibabel/parrec.py | 358 +++++---- nibabel/pkg_info.py | 13 +- nibabel/processing.py | 99 ++- nibabel/pydicom_compat.py | 9 +- nibabel/quaternions.py | 68 +- nibabel/rstutils.py | 44 +- nibabel/spaces.py | 10 +- nibabel/spatialimages.py | 128 ++-- nibabel/spm2analyze.py | 29 +- nibabel/spm99analyze.py | 59 +- nibabel/streamlines/__init__.py | 26 +- nibabel/streamlines/array_sequence.py | 185 +++-- nibabel/streamlines/header.py | 31 +- nibabel/streamlines/tck.py | 117 +-- .../streamlines/tests/test_array_sequence.py | 123 ++- nibabel/streamlines/tests/test_streamlines.py | 192 +++-- nibabel/streamlines/tests/test_tck.py | 87 +-- nibabel/streamlines/tests/test_tractogram.py | 557 +++++++------- .../streamlines/tests/test_tractogram_file.py | 8 +- nibabel/streamlines/tests/test_trk.py | 224 +++--- nibabel/streamlines/tractogram.py | 171 +++-- nibabel/streamlines/tractogram_file.py | 24 +- nibabel/streamlines/trk.py | 261 ++++--- nibabel/streamlines/utils.py | 6 +- nibabel/testing/__init__.py | 40 +- nibabel/testing/helpers.py | 13 +- nibabel/testing/np_features.py | 5 +- nibabel/tests/data/check_parrec_reslice.py | 15 +- nibabel/tests/data/gen_standard.py | 23 +- nibabel/tests/data/make_moved_anat.py | 6 +- nibabel/tests/nibabel_data.py | 11 +- nibabel/tests/scriptrunner.py | 35 +- nibabel/tests/test_affines.py | 94 +-- nibabel/tests/test_analyze.py | 156 ++-- nibabel/tests/test_api_validators.py | 26 +- nibabel/tests/test_arrayproxy.py | 101 ++- nibabel/tests/test_arraywriters.py | 135 ++-- nibabel/tests/test_batteryrunners.py | 2 +- nibabel/tests/test_brikhead.py | 54 +- nibabel/tests/test_casting.py | 64 +- nibabel/tests/test_data.py | 20 +- nibabel/tests/test_dataobj_images.py | 6 +- nibabel/tests/test_deprecated.py | 14 +- nibabel/tests/test_deprecator.py | 65 +- nibabel/tests/test_dft.py | 23 +- nibabel/tests/test_diff.py | 19 +- nibabel/tests/test_ecat.py | 104 +-- nibabel/tests/test_ecat_data.py | 4 +- nibabel/tests/test_endiancodes.py | 4 +- nibabel/tests/test_environment.py | 4 +- nibabel/tests/test_euler.py | 51 +- nibabel/tests/test_filebasedimages.py | 17 +- nibabel/tests/test_filehandles.py | 2 +- nibabel/tests/test_fileholders.py | 2 +- nibabel/tests/test_filename_parser.py | 54 +- nibabel/tests/test_files_interface.py | 16 +- nibabel/tests/test_fileslice.py | 469 +++++++----- nibabel/tests/test_fileutils.py | 2 +- nibabel/tests/test_floating.py | 102 ++- nibabel/tests/test_funcs.py | 32 +- nibabel/tests/test_image_api.py | 126 ++-- nibabel/tests/test_image_load_save.py | 24 +- nibabel/tests/test_image_types.py | 93 ++- nibabel/tests/test_imageclasses.py | 2 +- nibabel/tests/test_imageglobals.py | 2 +- nibabel/tests/test_imagestats.py | 2 +- nibabel/tests/test_init.py | 39 +- nibabel/tests/test_loadsave.py | 78 +- nibabel/tests/test_minc1.py | 83 +- nibabel/tests/test_minc2.py | 83 +- nibabel/tests/test_minc2_data.py | 56 +- nibabel/tests/test_mriutils.py | 5 +- nibabel/tests/test_nibabel_data.py | 2 +- nibabel/tests/test_nifti1.py | 336 +++++---- nibabel/tests/test_nifti2.py | 33 +- nibabel/tests/test_onetime.py | 1 + nibabel/tests/test_openers.py | 179 +++-- nibabel/tests/test_optpkg.py | 9 +- nibabel/tests/test_orientations.py | 269 +++---- nibabel/tests/test_parrec.py | 374 +++++---- nibabel/tests/test_parrec_data.py | 2 +- nibabel/tests/test_pkg_info.py | 75 +- nibabel/tests/test_processing.py | 107 ++- nibabel/tests/test_proxy_api.py | 87 ++- nibabel/tests/test_quaternions.py | 18 +- nibabel/tests/test_recoder.py | 6 +- nibabel/tests/test_removalschedule.py | 167 ++-- nibabel/tests/test_round_trip.py | 47 +- nibabel/tests/test_rstutils.py | 65 +- nibabel/tests/test_scaling.py | 107 +-- nibabel/tests/test_scripts.py | 237 +++--- nibabel/tests/test_spaces.py | 74 +- nibabel/tests/test_spatialimages.py | 115 +-- nibabel/tests/test_spm2analyze.py | 32 +- nibabel/tests/test_spm99analyze.py | 166 ++-- nibabel/tests/test_testing.py | 69 +- nibabel/tests/test_tmpdirs.py | 2 +- nibabel/tests/test_tripwire.py | 5 +- nibabel/tests/test_viewers.py | 9 +- nibabel/tests/test_volumeutils.py | 362 +++++---- nibabel/tests/test_wrapstruct.py | 38 +- nibabel/tmpdirs.py | 10 +- nibabel/tripwire.py | 11 +- nibabel/viewers.py | 118 +-- nibabel/volumeutils.py | 195 ++--- nibabel/wrapstruct.py | 67 +- nibabel/xmlutils.py | 14 +- 207 files changed, 8734 insertions(+), 7556 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index f96e80f0eb..ad14fc52dc 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -11,6 +11,7 @@ from .pkg_info import __version__ from .info import long_description as __doc__ + __doc__ += """ Quickstart ========== @@ -42,6 +43,7 @@ from . import spm2analyze as spm2 from . import nifti1 as ni1 from . import ecat + # object imports from .fileholders import FileHolder, FileHolderError from .loadsave import load, save @@ -56,10 +58,14 @@ from .cifti2 import Cifti2Header, Cifti2Image from .gifti import GiftiImage from .freesurfer import MGHImage -from .funcs import (squeeze_image, concat_images, four_to_three, - as_closest_canonical) -from .orientations import (io_orientation, flip_axis, OrientationError, - apply_orientation, aff2axcodes) +from .funcs import squeeze_image, concat_images, four_to_three, as_closest_canonical +from .orientations import ( + io_orientation, + flip_axis, + OrientationError, + apply_orientation, + aff2axcodes, +) from .imageclasses import all_image_classes from . import mriutils from . import streamlines @@ -72,9 +78,15 @@ def get_info(): return _get_pkg_info(os.path.dirname(__file__)) -def test(label=None, verbose=1, extra_argv=None, - doctests=False, coverage=False, raise_warnings=None, - timer=False): +def test( + label=None, + verbose=1, + extra_argv=None, + doctests=False, + coverage=False, + raise_warnings=None, + timer=False, +): """ Run tests for nibabel using pytest @@ -107,29 +119,30 @@ def test(label=None, verbose=1, extra_argv=None, Returns the result of running the tests as a ``pytest.ExitCode`` enum """ import pytest + args = [] if label is not None: - raise NotImplementedError("Labels cannot be set at present") + raise NotImplementedError('Labels cannot be set at present') verbose = int(verbose) if verbose > 0: - args.append("-" + "v" * verbose) + args.append('-' + 'v' * verbose) elif verbose < 0: - args.append("-" + "q" * -verbose) + args.append('-' + 'q' * -verbose) if extra_argv: args.extend(extra_argv) if doctests: - args.append("--doctest-modules") + args.append('--doctest-modules') if coverage: - args.extend(["--cov", "nibabel"]) + args.extend(['--cov', 'nibabel']) if raise_warnings is not None: - raise NotImplementedError("Warning filters are not implemented") + raise NotImplementedError('Warning filters are not implemented') if timer: - raise NotImplementedError("Timing is not implemented") + raise NotImplementedError('Timing is not implemented') - args.extend(["--pyargs", "nibabel"]) + args.extend(['--pyargs', 'nibabel']) return pytest.main(args=args) @@ -157,9 +170,10 @@ def bench(label=None, verbose=1, extra_argv=None): Returns the result of running the tests as a ``pytest.ExitCode`` enum """ from pkg_resources import resource_filename - config = resource_filename("nibabel", "benchmarks/pytest.benchmark.ini") + + config = resource_filename('nibabel', 'benchmarks/pytest.benchmark.ini') args = [] if extra_argv is not None: args.extend(extra_argv) - args.extend(["-c", config]) + args.extend(['-c', config]) return test(label, verbose, extra_argv=args) diff --git a/nibabel/affines.py b/nibabel/affines.py index 9fd141a8b7..c8bc586aa7 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -1,6 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Utility routines for working with points and affine transforms +"""Utility routines for working with points and affine transforms """ import numpy as np @@ -8,14 +8,15 @@ class AffineError(ValueError): - """ Errors in calculating or using affines """ + """Errors in calculating or using affines""" + # Inherits from ValueError to keep compatibility with ValueError previously # raised in append_diag pass def apply_affine(aff, pts, inplace=False): - """ Apply affine matrix `aff` to points `pts` + """Apply affine matrix `aff` to points `pts` Returns result of application of `aff` to the *right* of `pts`. The coordinate dimension of `pts` should be the last. @@ -142,7 +143,7 @@ def to_matvec(transform): def from_matvec(matrix, vector=None): - """ Combine a matrix and vector into an homogeneous affine + """Combine a matrix and vector into an homogeneous affine Combine a rotation / scaling / shearing matrix and translation vector into a transform in homogeneous coordinates. @@ -185,14 +186,14 @@ def from_matvec(matrix, vector=None): nin, nout = matrix.shape t = np.zeros((nin + 1, nout + 1), matrix.dtype) t[0:nin, 0:nout] = matrix - t[nin, nout] = 1. + t[nin, nout] = 1.0 if vector is not None: t[0:nin, nout] = vector return t def append_diag(aff, steps, starts=()): - """ Add diagonal elements `steps` and translations `starts` to affine + """Add diagonal elements `steps` and translations `starts` to affine Typical use is in expanding 4x4 affines to larger dimensions. Nipy is the main consumer because it uses NxM affines, whereas we generally only use @@ -236,8 +237,7 @@ def append_diag(aff, steps, starts=()): raise AffineError('Steps should have same length as starts') old_n_out, old_n_in = aff.shape[0] - 1, aff.shape[1] - 1 # make new affine - aff_plus = np.zeros((old_n_out + n_steps + 1, - old_n_in + n_steps + 1), dtype=aff.dtype) + aff_plus = np.zeros((old_n_out + n_steps + 1, old_n_in + n_steps + 1), dtype=aff.dtype) # Get stuff from old affine aff_plus[:old_n_out, :old_n_in] = aff[:old_n_out, :old_n_in] aff_plus[:old_n_out, -1] = aff[:old_n_out, -1] @@ -250,7 +250,7 @@ def append_diag(aff, steps, starts=()): def dot_reduce(*args): - r""" Apply numpy dot product function from right to left on arrays + r"""Apply numpy dot product function from right to left on arrays For passed arrays :math:`A, B, C, ... Z` returns :math:`A \dot B \dot C ... \dot Z` where "." is the numpy array dot product. @@ -270,7 +270,7 @@ def dot_reduce(*args): def voxel_sizes(affine): - r""" Return voxel size for each input axis given `affine` + r"""Return voxel size for each input axis given `affine` The `affine` is the mapping between array (voxel) coordinates and mm (world) coordinates. @@ -308,7 +308,7 @@ def voxel_sizes(affine): but in general has length (N-1) where input `affine` is shape (M, N). """ top_left = affine[:-1, :-1] - return np.sqrt(np.sum(top_left ** 2, axis=0)) + return np.sqrt(np.sum(top_left**2, axis=0)) def obliquity(affine): @@ -340,7 +340,7 @@ def obliquity(affine): def rescale_affine(affine, shape, zooms, new_shape=None): - """ Return a new affine matrix with updated voxel sizes (zooms) + """Return a new affine matrix with updated voxel sizes (zooms) This function preserves the rotations and shears of the original affine, as well as the RAS location of the central voxel of the diff --git a/nibabel/analyze.py b/nibabel/analyze.py index a1c1cf1d2f..648c75d68a 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to the basic Mayo Analyze format +"""Read / write access to the basic Mayo Analyze format =========================== The Analyze header format @@ -84,14 +84,18 @@ import numpy as np -from .volumeutils import (native_code, swapped_code, make_dt_codes, - shape_zoom_affine, array_from_file, seek_tell, - apply_read_scaling) -from .arraywriters import (make_array_writer, get_slope_inter, WriterError, - ArrayWriter) +from .volumeutils import ( + native_code, + swapped_code, + make_dt_codes, + shape_zoom_affine, + array_from_file, + seek_tell, + apply_read_scaling, +) +from .arraywriters import make_array_writer, get_slope_inter, WriterError, ArrayWriter from .wrapstruct import LabeledWrapStruct -from .spatialimages import (HeaderDataError, HeaderTypeError, - SpatialImage) +from .spatialimages import HeaderDataError, HeaderTypeError, SpatialImage from .fileholders import copy_file_map from .batteryrunners import Report from .arrayproxy import ArrayProxy @@ -105,7 +109,7 @@ ('extents', 'i4'), ('session_error', 'i2'), ('regular', 'S1'), - ('hkey_un0', 'S1') + ('hkey_un0', 'S1'), ] image_dimension_dtd = [ ('dim', 'i2', (8,)), @@ -125,7 +129,7 @@ ('compressed', 'i4'), ('verified', 'i4'), ('glmax', 'i4'), - ('glmin', 'i4') + ('glmin', 'i4'), ] data_history_dtd = [ ('descrip', 'S80'), @@ -145,12 +149,11 @@ ('omax', 'i4'), ('omin', 'i4'), ('smax', 'i4'), - ('smin', 'i4') + ('smin', 'i4'), ] # Full header numpy dtype combined across sub-fields -header_dtype = np.dtype(header_key_dtd + image_dimension_dtd + - data_history_dtd) +header_dtype = np.dtype(header_key_dtd + image_dimension_dtd + data_history_dtd) _dtdefs = ( # code, conversion function, equivalent dtype, aliases (0, 'none', np.void), @@ -161,21 +164,21 @@ (16, 'float32', np.float32), (32, 'complex64', np.complex64), # numpy complex format? (64, 'float64', np.float64), - (128, 'RGB', np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1')])), - (255, 'all', np.void)) + (128, 'RGB', np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')])), + (255, 'all', np.void), +) # Make full code alias bank, including dtype column data_type_codes = make_dt_codes(_dtdefs) class AnalyzeHeader(LabeledWrapStruct): - """ Class for basic analyze header + """Class for basic analyze header Implements zoom-only setting of affine transform, and no image scaling """ + # Copies of module-level definitions template_dtype = header_dtype _data_type_codes = data_type_codes @@ -190,11 +193,8 @@ class AnalyzeHeader(LabeledWrapStruct): sizeof_hdr = 348 - def __init__(self, - binaryblock=None, - endianness=None, - check=True): - """ Initialize header from binary data block + def __init__(self, binaryblock=None, endianness=None, check=True): + """Initialize header from binary data block Parameters ---------- @@ -252,7 +252,7 @@ def __init__(self, @classmethod def guessed_endian(klass, hdr): - """ Guess intended endianness from mapping-like ``hdr`` + """Guess intended endianness from mapping-like ``hdr`` Parameters ---------- @@ -335,8 +335,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - """ Return header data for empty header with given endianness - """ + """Return header data for empty header with given endianness""" hdr_data = super(AnalyzeHeader, klass).default_structarr(endianness) hdr_data['sizeof_hdr'] = klass.sizeof_hdr hdr_data['dim'] = 1 @@ -348,7 +347,7 @@ def default_structarr(klass, endianness=None): @classmethod def from_header(klass, header=None, check=True): - """ Class method to create header from another header + """Class method to create header from another header Parameters ---------- @@ -394,9 +393,11 @@ def from_header(klass, header=None, check=True): try: obj.set_data_dtype(orig_code) except HeaderDataError: - raise HeaderDataError(f"Input header {header.__class__} has " - f"datatype {header.get_value_label('datatype')} " - f"but output header {klass} does not support it") + raise HeaderDataError( + f'Input header {header.__class__} has ' + f"datatype {header.get_value_label('datatype')} " + f'but output header {klass} does not support it' + ) obj.set_data_dtype(header.get_data_dtype()) obj.set_data_shape(header.get_data_shape()) obj.set_zooms(header.get_zooms()) @@ -405,7 +406,7 @@ def from_header(klass, header=None, check=True): return obj def _clean_after_mapping(self): - """ Set format-specific stuff after converting header from mapping + """Set format-specific stuff after converting header from mapping This routine cleans up Analyze-type headers that have had their fields set from an Analyze map returned by the ``as_analyze_map`` method. @@ -426,7 +427,7 @@ def _clean_after_mapping(self): pass def raw_data_from_fileobj(self, fileobj): - """ Read unscaled data array from `fileobj` + """Read unscaled data array from `fileobj` Parameters ---------- @@ -444,7 +445,7 @@ def raw_data_from_fileobj(self, fileobj): return array_from_file(shape, dtype, fileobj, offset) def data_from_fileobj(self, fileobj): - """ Read scaled data array from `fileobj` + """Read scaled data array from `fileobj` Use this routine to get the scaled image data from an image file `fileobj`, given a header `self`. "Scaled" means, with any header @@ -478,7 +479,7 @@ def data_from_fileobj(self, fileobj): return apply_read_scaling(data, slope, inter) def data_to_fileobj(self, data, fileobj, rescale=True): - """ Write `data` to `fileobj`, maybe rescaling data, modifying `self` + """Write `data` to `fileobj`, maybe rescaling data, modifying `self` In writing the data, we match the header to the written data, by setting the header scaling factors, iff `rescale` is True. Thus we @@ -512,15 +513,13 @@ def data_to_fileobj(self, data, fileobj, rescale=True): data = np.asanyarray(data) shape = self.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % - ', '.join(str(s) for s in shape)) + raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) out_dtype = self.get_data_dtype() if rescale: try: - arr_writer = make_array_writer(data, - out_dtype, - self.has_data_slope, - self.has_data_intercept) + arr_writer = make_array_writer( + data, out_dtype, self.has_data_slope, self.has_data_intercept + ) except WriterError as e: raise HeaderTypeError(str(e)) else: @@ -530,7 +529,7 @@ def data_to_fileobj(self, data, fileobj, rescale=True): self.set_slope_inter(*get_slope_inter(arr_writer)) def get_data_dtype(self): - """ Get numpy dtype for data + """Get numpy dtype for data For examples see ``set_data_dtype`` """ @@ -539,7 +538,7 @@ def get_data_dtype(self): return dtype.newbyteorder(self.endianness) def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code or dtype or type + """Set numpy dtype for data from code or dtype or type Examples -------- @@ -568,22 +567,19 @@ def set_data_dtype(self, datatype): try: dt = np.dtype(dt) except TypeError: - raise HeaderDataError( - f'data dtype "{datatype}" not recognized') + raise HeaderDataError(f'data dtype "{datatype}" not recognized') if dt not in self._data_type_codes: - raise HeaderDataError( - f'data dtype "{datatype}" not supported') + raise HeaderDataError(f'data dtype "{datatype}" not supported') code = self._data_type_codes[dt] dtype = self._data_type_codes.dtype[code] # test for void, being careful of user-defined types if dtype.type is np.void and not dtype.fields: - raise HeaderDataError( - f'data dtype "{datatype}" known but not supported') + raise HeaderDataError(f'data dtype "{datatype}" known but not supported') self._structarr['datatype'] = code self._structarr['bitpix'] = dtype.itemsize * 8 def get_data_shape(self): - """ Get shape of data + """Get shape of data Examples -------- @@ -602,11 +598,11 @@ def get_data_shape(self): dims = self._structarr['dim'] ndims = dims[0] if ndims == 0: - return 0, - return tuple(int(d) for d in dims[1:ndims + 1]) + return (0,) + return tuple(int(d) for d in dims[1 : ndims + 1]) def set_data_shape(self, shape): - """ Set shape of data + """Set shape of data If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -621,20 +617,20 @@ def set_data_shape(self, shape): dims[:] = 1 dims[0] = ndims try: - dims[1:ndims + 1] = shape + dims[1 : ndims + 1] = shape except (ValueError, OverflowError): # numpy 1.4.1 at least generates a ValueError from trying to set a # python long into an int64 array (dims are int64 for nifti2) values_fit = False else: - values_fit = np.all(dims[1:ndims + 1] == shape) + values_fit = np.all(dims[1 : ndims + 1] == shape) # Error if we did not succeed setting dimensions if not values_fit: raise HeaderDataError(f'shape {shape} does not fit in dim datatype') - self._structarr['pixdim'][ndims + 1:] = 1.0 + self._structarr['pixdim'][ndims + 1 :] = 1.0 def get_base_affine(self): - """ Get affine from basic (shared) header fields + """Get affine from basic (shared) header fields Note that we get the translations from the center of the image. @@ -655,14 +651,14 @@ def get_base_affine(self): hdr = self._structarr dims = hdr['dim'] ndim = dims[0] - return shape_zoom_affine(hdr['dim'][1:ndim + 1], - hdr['pixdim'][1:ndim + 1], - self.default_x_flip) + return shape_zoom_affine( + hdr['dim'][1 : ndim + 1], hdr['pixdim'][1 : ndim + 1], self.default_x_flip + ) get_best_affine = get_base_affine def get_zooms(self): - """ Get zooms from header + """Get zooms from header Returns ------- @@ -687,10 +683,10 @@ def get_zooms(self): if ndim == 0: return (1.0,) pixdims = hdr['pixdim'] - return tuple(pixdims[1:ndim + 1]) + return tuple(pixdims[1 : ndim + 1]) def set_zooms(self, zooms): - """ Set zooms into header fields + """Set zooms into header fields See docstring for ``get_zooms`` for examples """ @@ -699,15 +695,14 @@ def set_zooms(self, zooms): ndim = dims[0] zooms = np.asarray(zooms) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' - % (ndim, ndim)) + raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) if np.any(zooms < 0): raise HeaderDataError('zooms must be positive') pixdims = hdr['pixdim'] - pixdims[1:ndim + 1] = zooms[:] + pixdims[1 : ndim + 1] = zooms[:] def as_analyze_map(self): - """ Return header as mapping for conversion to Analyze types + """Return header as mapping for conversion to Analyze types Collect data from custom header type to fill in fields for Analyze and derived header types (such as Nifti1 and Nifti2). @@ -746,12 +741,11 @@ def as_analyze_map(self): return self def set_data_offset(self, offset): - """ Set offset into data file to read data - """ + """Set offset into data file to read data""" self._structarr['vox_offset'] = offset def get_data_offset(self): - """ Return offset into data file to read data + """Return offset into data file to read data Examples -------- @@ -765,14 +759,14 @@ def get_data_offset(self): return int(self._structarr['vox_offset']) def get_slope_inter(self): - """ Get scalefactor and intercept + """Get scalefactor and intercept These are not implemented for basic Analyze """ return None, None def set_slope_inter(self, slope, inter=None): - """ Set slope and / or intercept into header + """Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -789,19 +783,14 @@ def set_slope_inter(self, slope, inter=None): inter : None or float, optional If float, value must be 0.0 or we raise a ``HeaderTypeError`` """ - if ((slope in (None, 1) or np.isnan(slope)) and - (inter in (None, 0) or np.isnan(inter))): + if (slope in (None, 1) or np.isnan(slope)) and (inter in (None, 0) or np.isnan(inter)): return - raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 ' - 'for Analyze headers') + raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 ' 'for Analyze headers') @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ - return (klass._chk_sizeof_hdr, - klass._chk_datatype, - klass._chk_bitpix, - klass._chk_pixdims) + """Return sequence of check functions for this class""" + return (klass._chk_sizeof_hdr, klass._chk_datatype, klass._chk_bitpix, klass._chk_pixdims) """ Check functions in format expected by BatteryRunner class """ @@ -893,15 +882,16 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) bs_hdr_struct = hdr_struct.byteswap() return 348 in (hdr_struct['sizeof_hdr'], bs_hdr_struct['sizeof_hdr']) class AnalyzeImage(SpatialImage): - """ Class for basic Analyze format image - """ + """Class for basic Analyze format image""" + header_class = AnalyzeHeader _meta_sniff_len = header_class.sizeof_hdr files_types = (('image', '.img'), ('header', '.hdr')) @@ -913,16 +903,15 @@ class AnalyzeImage(SpatialImage): ImageArrayProxy = ArrayProxy - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None, dtype=None): - super(AnalyzeImage, self).__init__( - dataobj, affine, header, extra, file_map) + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None): + super(AnalyzeImage, self).__init__(dataobj, affine, header, extra, file_map) # Reset consumable values self._header.set_data_offset(0) self._header.set_slope_inter(None, None) if dtype is not None: self.set_data_dtype(dtype) + __init__.__doc__ = SpatialImage.__init__.__doc__ def get_data_dtype(self): @@ -933,7 +922,7 @@ def set_data_dtype(self, dtype): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -971,20 +960,21 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): imgf = img_fh.fileobj if imgf is None: imgf = img_fh.filename - data = klass.ImageArrayProxy(imgf, hdr_copy, mmap=mmap, - keep_file_open=keep_file_open) + data = klass.ImageArrayProxy(imgf, hdr_copy, mmap=mmap, keep_file_open=keep_file_open) # Initialize without affine to allow header to pass through unmodified img = klass(data, None, header, file_map=file_map) # set affine from header though img._affine = header.get_best_affine() - img._load_cache = {'header': hdr_copy, - 'affine': img._affine.copy(), - 'file_map': copy_file_map(file_map)} + img._load_cache = { + 'header': hdr_copy, + 'affine': img._affine.copy(), + 'file_map': copy_file_map(file_map), + } return img @staticmethod def _get_fileholders(file_map): - """ Return fileholder for header and image + """Return fileholder for header and image Allows single-file image types to return one fileholder for both types. For Analyze there are two fileholders, one for the header, one for the @@ -993,7 +983,7 @@ def _get_fileholders(file_map): return file_map['header'], file_map['image'] def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -1022,10 +1012,9 @@ def to_file_map(self, file_map=None, dtype=None): scale_me = np.all(np.isnan((slope, inter))) try: if scale_me: - arr_writer = make_array_writer(data, - out_dtype, - hdr.has_data_slope, - hdr.has_data_intercept) + arr_writer = make_array_writer( + data, out_dtype, hdr.has_data_slope, hdr.has_data_intercept + ) else: arr_writer = ArrayWriter(data, out_dtype, check_scaling=False) except WriterError: diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index dc9b171c0b..bb97b8efb0 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Array proxy base class +"""Array proxy base class The proxy API is - at minimum: @@ -55,7 +55,7 @@ class ArrayProxy: - """ Class to act as proxy for the array that can be read from a file + """Class to act as proxy for the array that can be read from a file The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. @@ -84,6 +84,7 @@ class ArrayProxy: See :mod:`nibabel.minc1`, :mod:`nibabel.ecat` and :mod:`nibabel.parrec` for examples. """ + _default_order = 'F' def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=None): @@ -138,25 +139,30 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non self.file_like = file_like if hasattr(spec, 'get_data_shape'): slope, inter = spec.get_slope_inter() - par = (spec.get_data_shape(), - spec.get_data_dtype(), - spec.get_data_offset(), - 1. if slope is None else slope, - 0. if inter is None else inter) + par = ( + spec.get_data_shape(), + spec.get_data_dtype(), + spec.get_data_offset(), + 1.0 if slope is None else slope, + 0.0 if inter is None else inter, + ) elif 2 <= len(spec) <= 5: - optional = (0, 1., 0.) - par = spec + optional[len(spec) - 2:] + optional = (0, 1.0, 0.0) + par = spec + optional[len(spec) - 2 :] else: raise TypeError('spec must be tuple of length 2-5 or header object') # Warn downstream users that the class variable order is going away if hasattr(self.__class__, 'order'): - warnings.warn(f'Class {self.__class__} has an `order` class variable. ' - 'ArrayProxy subclasses should rename this variable to `_default_order` ' - 'to avoid conflict with instance variables.\n' - '* deprecated in version: 5.0\n' - '* will raise error in version: 7.0\n', - DeprecationWarning, stacklevel=2) + warnings.warn( + f'Class {self.__class__} has an `order` class variable. ' + 'ArrayProxy subclasses should rename this variable to `_default_order` ' + 'to avoid conflict with instance variables.\n' + '* deprecated in version: 5.0\n' + '* will raise error in version: 7.0\n', + DeprecationWarning, + stacklevel=2, + ) # Override _default_order with order, to follow intent of subclasser self._default_order = self.order @@ -170,8 +176,9 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non self.order = order # Flags to keep track of whether a single ImageOpener is created, and # whether a single underlying file handle is created. - self._keep_file_open, self._persist_opener = \ - self._should_keep_file_open(file_like, keep_file_open) + self._keep_file_open, self._persist_opener = self._should_keep_file_open( + file_like, keep_file_open + ) self._lock = RLock() def __del__(self): @@ -183,13 +190,13 @@ def __del__(self): self._opener = None def __getstate__(self): - """Returns the state of this ``ArrayProxy`` during pickling. """ + """Returns the state of this ``ArrayProxy`` during pickling.""" state = self.__dict__.copy() state.pop('_lock', None) return state def __setstate__(self, state): - """Sets the state of this ``ArrayProxy`` during unpickling. """ + """Sets the state of this ``ArrayProxy`` during unpickling.""" self.__dict__.update(state) self._lock = RLock() @@ -260,8 +267,10 @@ def _should_keep_file_open(self, file_like, keep_file_open): if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT if keep_file_open not in (True, False): - raise ValueError("nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT " - f"must be boolean. Found: {keep_file_open}") + raise ValueError( + 'nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT ' + f'must be boolean. Found: {keep_file_open}' + ) elif keep_file_open not in (True, False): raise ValueError('keep_file_open must be one of {None, True, False}') @@ -317,32 +326,35 @@ def _get_fileobj(self): """ if self._persist_opener: if not hasattr(self, '_opener'): - self._opener = openers.ImageOpener( - self.file_like, keep_open=self._keep_file_open) + self._opener = openers.ImageOpener(self.file_like, keep_open=self._keep_file_open) yield self._opener else: - with openers.ImageOpener( - self.file_like, keep_open=False) as opener: + with openers.ImageOpener(self.file_like, keep_open=False) as opener: yield opener def _get_unscaled(self, slicer): - if canonical_slicers(slicer, self._shape, False) == \ - canonical_slicers((), self._shape, False): + if canonical_slicers(slicer, self._shape, False) == canonical_slicers( + (), self._shape, False + ): with self._get_fileobj() as fileobj, self._lock: - return array_from_file(self._shape, - self._dtype, - fileobj, - offset=self._offset, - order=self.order, - mmap=self._mmap) + return array_from_file( + self._shape, + self._dtype, + fileobj, + offset=self._offset, + order=self.order, + mmap=self._mmap, + ) with self._get_fileobj() as fileobj: - return fileslice(fileobj, - slicer, - self._shape, - self._dtype, - self._offset, - order=self.order, - lock=self._lock) + return fileslice( + fileobj, + slicer, + self._shape, + self._dtype, + self._offset, + order=self.order, + lock=self._lock, + ) def _get_scaled(self, dtype, slicer): # Ensure scale factors have dtypes @@ -361,14 +373,14 @@ def _get_scaled(self, dtype, slicer): return scaled def get_unscaled(self): - """ Read data from file + """Read data from file This is an optional part of the proxy API """ return self._get_unscaled(slicer=()) def __array__(self, dtype=None): - """ Read data from file and apply scaling, casting to ``dtype`` + """Read data from file and apply scaling, casting to ``dtype`` If ``dtype`` is unspecified, the dtype of the returned array is the narrowest dtype that can represent the data without overflow. @@ -397,31 +409,32 @@ def __getitem__(self, slicer): return self._get_scaled(dtype=None, slicer=slicer) def reshape(self, shape): - """ Return an ArrayProxy with a new shape, without modifying data """ + """Return an ArrayProxy with a new shape, without modifying data""" size = np.prod(self._shape) # Calculate new shape if not fully specified from operator import mul from functools import reduce + n_unknowns = len([e for e in shape if e == -1]) if n_unknowns > 1: - raise ValueError("can only specify one unknown dimension") + raise ValueError('can only specify one unknown dimension') elif n_unknowns == 1: known_size = reduce(mul, shape, -1) unknown_size = size // known_size shape = tuple(unknown_size if e == -1 else e for e in shape) if np.prod(shape) != size: - raise ValueError(f"cannot reshape array of size {size:d} into shape {shape!s}") - return self.__class__(file_like=self.file_like, - spec=(shape, self._dtype, self._offset, - self._slope, self._inter), - mmap=self._mmap) + raise ValueError(f'cannot reshape array of size {size:d} into shape {shape!s}') + return self.__class__( + file_like=self.file_like, + spec=(shape, self._dtype, self._offset, self._slope, self._inter), + mmap=self._mmap, + ) def is_proxy(obj): - """ Return True if `obj` is an array proxy - """ + """Return True if `obj` is an array proxy""" try: return obj.is_proxy except AttributeError: @@ -429,19 +442,17 @@ def is_proxy(obj): def reshape_dataobj(obj, shape): - """ Use `obj` reshape method if possible, else numpy reshape function - """ - return (obj.reshape(shape) if hasattr(obj, 'reshape') - else np.reshape(obj, shape)) + """Use `obj` reshape method if possible, else numpy reshape function""" + return obj.reshape(shape) if hasattr(obj, 'reshape') else np.reshape(obj, shape) def get_obj_dtype(obj): - """ Get the effective dtype of an array-like object """ + """Get the effective dtype of an array-like object""" if is_proxy(obj): # Read and potentially apply scaling to one value idx = (0,) * len(obj.shape) return obj[idx].dtype - elif hasattr(obj, "dtype"): + elif hasattr(obj, 'dtype'): # Trust the dtype (probably an ndarray) return obj.dtype else: diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index c2bbb2912c..1a80bcfa98 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -1,4 +1,4 @@ -""" Array writer objects +"""Array writer objects Array writers have init signature:: @@ -31,8 +31,15 @@ def __init__(self, array, out_dtype=None) import numpy as np -from .casting import (int_to_float, as_int, int_abs, type_info, floor_exact, - best_float, shared_range) +from .casting import ( + int_to_float, + as_int, + int_abs, + type_info, + floor_exact, + best_float, + shared_range, +) from .volumeutils import finite_range, array_to_file @@ -45,9 +52,8 @@ class ScalingError(WriterError): class ArrayWriter: - def __init__(self, array, out_dtype=None, **kwargs): - r""" Initialize array writer + r"""Initialize array writer Parameters ---------- @@ -92,10 +98,10 @@ def __init__(self, array, out_dtype=None, **kwargs): self._has_nan = None self._nan2zero = nan2zero if check_scaling and self.scaling_needed(): - raise WriterError("Scaling needed but cannot scale") + raise WriterError('Scaling needed but cannot scale') def scaling_needed(self): - """ Checks if scaling is needed for input array + """Checks if scaling is needed for input array Raises WriterError if no scaling possible. @@ -155,18 +161,17 @@ def scaling_needed(self): @property def array(self): - """ Return array from arraywriter """ + """Return array from arraywriter""" return self._array @property def out_dtype(self): - """ Return `out_dtype` from arraywriter """ + """Return `out_dtype` from arraywriter""" return self._out_dtype @property def has_nan(self): - """ True if array has NaNs - """ + """True if array has NaNs""" # Structured types raise an error for finite range; don't run finite # range unless we have to. if self._has_nan is None: @@ -177,7 +182,7 @@ def has_nan(self): return self._has_nan def finite_range(self): - """ Return (maybe cached) finite range of data array """ + """Return (maybe cached) finite range of data array""" if self._finite_range is None: mn, mx, has_nan = finite_range(self._array, True) self._finite_range = (mn, mx) @@ -185,14 +190,16 @@ def finite_range(self): return self._finite_range def _needs_nan2zero(self): - """ True if nan2zero check needed for writing array """ - return (self._nan2zero and - self._array.dtype.kind in 'fc' and - self.out_dtype.kind in 'iu' and - self.has_nan) + """True if nan2zero check needed for writing array""" + return ( + self._nan2zero + and self._array.dtype.kind in 'fc' + and self.out_dtype.kind in 'iu' + and self.has_nan + ) def to_fileobj(self, fileobj, order='F'): - """ Write array into `fileobj` + """Write array into `fileobj` Parameters ---------- @@ -200,18 +207,20 @@ def to_fileobj(self, fileobj, order='F'): order : {'F', 'C'} order (Fortran or C) to which to write array """ - array_to_file(self._array, - fileobj, - self._out_dtype, - offset=None, - mn=None, - mx=None, - order=order, - nan2zero=self._needs_nan2zero()) + array_to_file( + self._array, + fileobj, + self._out_dtype, + offset=None, + mn=None, + mx=None, + order=order, + nan2zero=self._needs_nan2zero(), + ) class SlopeArrayWriter(ArrayWriter): - """ ArrayWriter that can use scalefactor for writing arrays + """ArrayWriter that can use scalefactor for writing arrays The scalefactor allows the array writer to write floats to int output types, and rescale larger ints to smaller. It can therefore lose @@ -227,9 +236,8 @@ class SlopeArrayWriter(ArrayWriter): * calc_scale() - calculate slope to best write self.array """ - def __init__(self, array, out_dtype=None, calc_scale=True, - scaler_dtype=np.float32, **kwargs): - r""" Initialize array writer + def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float32, **kwargs): + r"""Initialize array writer Parameters ---------- @@ -286,7 +294,7 @@ def __init__(self, array, out_dtype=None, calc_scale=True, self.calc_scale() def scaling_needed(self): - """ Checks if scaling is needed for input array + """Checks if scaling is needed for input array Raises WriterError if no scaling possible. @@ -312,7 +320,7 @@ def scaling_needed(self): return (mn, mx) != (np.inf, -np.inf) def reset(self): - """ Set object to values before any scaling calculation """ + """Set object to values before any scaling calculation""" self.slope = 1.0 self._finite_range = None self._scale_calced = False @@ -322,11 +330,11 @@ def _get_slope(self): def _set_slope(self, val): self._slope = np.squeeze(self.scaler_dtype.type(val)) + slope = property(_get_slope, _set_slope, None, 'get/set slope') def calc_scale(self, force=False): - """ Calculate / set scaling for floats/(u)ints to (u)ints - """ + """Calculate / set scaling for floats/(u)ints to (u)ints""" # If we've run already, return unless told otherwise if not force and self._scale_calced: return @@ -337,7 +345,7 @@ def calc_scale(self, force=False): self._scale_calced = True def _writing_range(self): - """ Finite range for thresholding on write """ + """Finite range for thresholding on write""" if self._out_dtype.kind in 'iu' and self._array.dtype.kind == 'f': mn, mx = self.finite_range() if (mn, mx) == (np.inf, -np.inf): # no finite data @@ -346,7 +354,7 @@ def _writing_range(self): return None, None def to_fileobj(self, fileobj, order='F'): - """ Write array into `fileobj` + """Write array into `fileobj` Parameters ---------- @@ -355,15 +363,17 @@ def to_fileobj(self, fileobj, order='F'): order (Fortran or C) to which to write array """ mn, mx = self._writing_range() - array_to_file(self._array, - fileobj, - self._out_dtype, - offset=None, - divslope=self.slope, - mn=mn, - mx=mx, - order=order, - nan2zero=self._needs_nan2zero()) + array_to_file( + self._array, + fileobj, + self._out_dtype, + offset=None, + divslope=self.slope, + mn=mn, + mx=mx, + order=order, + nan2zero=self._needs_nan2zero(), + ) def _do_scaling(self): arr = self._array @@ -383,7 +393,7 @@ def _do_scaling(self): out_max, out_min = info.max, info.min # If left as int64, uint64, comparisons will default to floats, and # these are inexact for > 2**53 - so convert to int - if (as_int(mx) <= as_int(out_max) and as_int(mn) >= as_int(out_min)): + if as_int(mx) <= as_int(out_max) and as_int(mn) >= as_int(out_min): # already in range return # (u)int to (u)int scaling @@ -408,7 +418,7 @@ def _iu2iu(self): self._range_scale(mn, mx) def _range_scale(self, in_min, in_max): - """ Calculate scaling based on data range and output type """ + """Calculate scaling based on data range and output type""" out_dtype = self._out_dtype info = type_info(out_dtype) out_min, out_max = info['min'], info['max'] @@ -418,12 +428,12 @@ def _range_scale(self, in_min, in_max): # not lose precision because min/max are of fp type. out_min, out_max = np.array((out_min, out_max), dtype=big_float) else: # (u)int - out_min, out_max = [int_to_float(v, big_float) - for v in (out_min, out_max)] + out_min, out_max = [int_to_float(v, big_float) for v in (out_min, out_max)] if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: - raise WriterError('Cannot scale negative and positive ' - 'numbers to uint without intercept') + raise WriterError( + 'Cannot scale negative and positive ' 'numbers to uint without intercept' + ) if in_max <= 0: # All input numbers <= 0 self.slope = in_min / out_max else: # All input numbers > 0 @@ -438,7 +448,7 @@ def _range_scale(self, in_min, in_max): class SlopeInterArrayWriter(SlopeArrayWriter): - """ Array writer that can use slope and intercept to scale array + """Array writer that can use slope and intercept to scale array The writer can subtract an intercept, and divided by a slope, in order to be able to convert floating point values into a (u)int range, or to convert @@ -455,9 +465,8 @@ class SlopeInterArrayWriter(SlopeArrayWriter): * calc_scale() - calculate inter, slope to best write self.array """ - def __init__(self, array, out_dtype=None, calc_scale=True, - scaler_dtype=np.float32, **kwargs): - r""" Initialize array writer + def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float32, **kwargs): + r"""Initialize array writer Parameters ---------- @@ -498,14 +507,12 @@ def __init__(self, array, out_dtype=None, calc_scale=True, >>> (aw.slope, aw.inter) == (1.0, 128) True """ - super(SlopeInterArrayWriter, self).__init__(array, - out_dtype, - calc_scale, - scaler_dtype, - **kwargs) + super(SlopeInterArrayWriter, self).__init__( + array, out_dtype, calc_scale, scaler_dtype, **kwargs + ) def reset(self): - """ Set object to values before any scaling calculation """ + """Set object to values before any scaling calculation""" super(SlopeInterArrayWriter, self).reset() self.inter = 0.0 @@ -514,10 +521,11 @@ def _get_inter(self): def _set_inter(self, val): self._inter = np.squeeze(self.scaler_dtype.type(val)) + inter = property(_get_inter, _set_inter, None, 'get/set inter') def to_fileobj(self, fileobj, order='F'): - """ Write array into `fileobj` + """Write array into `fileobj` Parameters ---------- @@ -526,16 +534,18 @@ def to_fileobj(self, fileobj, order='F'): order (Fortran or C) to which to write array """ mn, mx = self._writing_range() - array_to_file(self._array, - fileobj, - self._out_dtype, - offset=None, - intercept=self.inter, - divslope=self.slope, - mn=mn, - mx=mx, - order=order, - nan2zero=self._needs_nan2zero()) + array_to_file( + self._array, + fileobj, + self._out_dtype, + offset=None, + intercept=self.inter, + divslope=self.slope, + mn=mn, + mx=mx, + order=order, + nan2zero=self._needs_nan2zero(), + ) def _iu2iu(self): # (u)int to (u)int @@ -546,8 +556,7 @@ def _iu2iu(self): # Options in this method are scaling using intercept only. These will # have to pass through ``self.scaler_dtype`` (because the intercept is # in this type). - o_min, o_max = [as_int(v) - for v in shared_range(self.scaler_dtype, out_dtype)] + o_min, o_max = [as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)] type_range = o_max - o_min mn2mx = mx - mn if mn2mx <= type_range: # might offset be enough? @@ -573,10 +582,9 @@ def _iu2iu(self): super(SlopeInterArrayWriter, self)._iu2iu() def _range_scale(self, in_min, in_max): - """ Calculate scaling, intercept based on data range and output type - """ + """Calculate scaling, intercept based on data range and output type""" if in_max == in_min: # Only one number in array - self.slope = 1. + self.slope = 1.0 self.inter = in_min return big_float = best_float() @@ -596,8 +604,7 @@ def _range_scale(self, in_min, in_max): in_min, in_max = as_int(in_min), as_int(in_max) in_range = int_to_float(in_max - in_min, big_float) # Cast to float for later processing. - in_min, in_max = [int_to_float(v, big_float) - for v in (in_min, in_max)] + in_min, in_max = [int_to_float(v, big_float) for v in (in_min, in_max)] if out_dtype.kind == 'f': # Type range, these are also floats info = type_info(out_dtype) @@ -676,7 +683,7 @@ def _range_scale(self, in_min, in_max): self.inter = inter self.slope = slope if not np.all(np.isfinite([self.slope, self.inter])): - raise ScalingError("Slope / inter not both finite") + raise ScalingError('Slope / inter not both finite') # Check nan fill value if not (0 in (in_min, in_max) and self._nan2zero and self.has_nan): return @@ -691,7 +698,7 @@ def _range_scale(self, in_min, in_max): def get_slope_inter(writer): - """ Return slope, intercept from array writer object + """Return slope, intercept from array writer object Parameters ---------- @@ -725,9 +732,8 @@ def get_slope_inter(writer): return slope, inter -def make_array_writer(data, out_type, has_slope=True, has_intercept=True, - **kwargs): - r""" Make array writer instance for array `data` and output type `out_type` +def make_array_writer(data, out_type, has_slope=True, has_intercept=True, **kwargs): + r"""Make array writer instance for array `data` and output type `out_type` Parameters ---------- diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index a860ba3778..50650b1647 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Battery runner classes and Report classes +"""Battery runner classes and Report classes These classes / objects are for generic checking / fixing batteries @@ -104,15 +104,14 @@ def chk_pixdims(hdr, fix=True): hdr['pixdim'][1:4] = np.abs(hdr['pixdim'][1:4]) rep.fix_msg = 'setting to abs of pixdim values' return hdr, rep - """ class BatteryRunner: - """ Class to run set of checks """ + """Class to run set of checks""" def __init__(self, checks): - """ Initialize instance from sequence of `checks` + """Initialize instance from sequence of `checks` Parameters ---------- @@ -130,7 +129,7 @@ def __init__(self, checks): self._checks = checks def check_only(self, obj): - """ Run checks on `obj` returning reports + """Run checks on `obj` returning reports Parameters ---------- @@ -150,7 +149,7 @@ def check_only(self, obj): return reports def check_fix(self, obj): - """ Run checks, with fixes, on `obj` returning `obj`, reports + """Run checks, with fixes, on `obj` returning `obj`, reports Parameters ---------- @@ -175,13 +174,8 @@ def __len__(self): class Report: - - def __init__(self, - error=Exception, - problem_level=0, - problem_msg='', - fix_msg=''): - """ Initialize report with values + def __init__(self, error=Exception, problem_level=0, problem_msg='', fix_msg=''): + """Initialize report with values Parameters ---------- @@ -214,7 +208,7 @@ def __init__(self, self.fix_msg = fix_msg def __getstate__(self): - """ State that defines object + """State that defines object Returns ------- @@ -223,7 +217,7 @@ def __getstate__(self): return self.error, self.problem_level, self.problem_msg, self.fix_msg def __eq__(self, other): - """ are two BatteryRunner-like objects equal? + """are two BatteryRunner-like objects equal? Parameters ---------- @@ -243,26 +237,25 @@ def __eq__(self, other): return self.__getstate__() == other.__getstate__() def __ne__(self, other): - """ are two BatteryRunner-like objects not equal? + """are two BatteryRunner-like objects not equal? See docstring for __eq__ """ return not self == other def __str__(self): - """ Printable string for object """ + """Printable string for object""" return self.__dict__.__str__() @property def message(self): - """ formatted message string, including fix message if present - """ + """formatted message string, including fix message if present""" if self.fix_msg: return '; '.join((self.problem_msg, self.fix_msg)) return self.problem_msg def log_raise(self, logger, error_level=40): - """ Log problem, raise error if problem >= `error_level` + """Log problem, raise error if problem >= `error_level` Parameters ---------- @@ -277,7 +270,7 @@ def log_raise(self, logger, error_level=40): raise self.error(self.problem_msg) def write_raise(self, stream, error_level=40, log_level=30): - """ Write report to `stream` + """Write report to `stream` Parameters ---------- diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index ee0d25044d..7b59fbcaec 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -1,4 +1,4 @@ -""" Benchmarks for array_to_file routine +"""Benchmarks for array_to_file routine Run benchmarks with:: @@ -28,7 +28,7 @@ def bench_array_to_file(): img_shape = (128, 128, 64, 10) arr = rng.normal(size=img_shape) sys.stdout.flush() - print_git_title("\nArray to file") + print_git_title('\nArray to file') mtime = measure('array_to_file(arr, BytesIO(), np.float32)', repeat) print('%30s %6.2f' % ('Save float64 to float32', mtime)) mtime = measure('array_to_file(arr, BytesIO(), np.int16)', repeat) diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index fb037eec29..71ea801756 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -124,7 +124,7 @@ def fmt_sliceobj(sliceobj): results = [] # We use the same random seed for each slice object, - seeds = [np.random.randint(0, 2 ** 32) for s in SLICEOBJS] + seeds = [np.random.randint(0, 2**32) for s in SLICEOBJS] for ti, test in enumerate(tests): @@ -144,8 +144,7 @@ def basefunc(): img.dataobj[fix_sliceobj(sliceobj)] def testfunc(): - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', - have_igzip): + with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', have_igzip): imggz.dataobj[fix_sliceobj(sliceobj)] # make sure nothing is floating around from the previous test @@ -167,8 +166,7 @@ def testfunc(): np.random.seed(seed) basetime = float(timeit(basefunc, number=NITERS)) / float(NITERS) - results.append((label, keep_open, sliceobj, testtime, basetime, - testmem, basemem)) + results.append((label, keep_open, sliceobj, testtime, basetime, testmem, basemem)) data = np.zeros((len(results), 4)) data[:, 0] = [r[3] for r in results] diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index 1c531f9113..59b6aa9314 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -1,4 +1,4 @@ -""" Benchmarks for fileslicing +"""Benchmarks for fileslicing import nibabel as nib nib.bench() @@ -22,20 +22,13 @@ SHAPE = (64, 64, 32, 100) ROW_NAMES = [f'axis {i}, len {dim}' for i, dim in enumerate(SHAPE)] -COL_NAMES = ['mid int', - 'step 1', - 'half step 1', - 'step mid int'] -HAVE_ZSTD = optional_package("pyzstd")[1] +COL_NAMES = ['mid int', 'step 1', 'half step 1', 'step mid int'] +HAVE_ZSTD = optional_package('pyzstd')[1] def _slices_for_len(L): # Example slices for a dimension of length L - return ( - L // 2, - slice(None, None, 1), - slice(None, L // 2, 1), - slice(None, None, L // 2)) + return (L // 2, slice(None, None, 1), slice(None, L // 2, 1), slice(None, None, L // 2)) def run_slices(file_like, repeat=3, offset=0, order='F'): @@ -53,63 +46,48 @@ def run_slices(file_like, repeat=3, offset=0, order='F'): sliceobj[i] = slicer def f(): - fileslice(fobj, - tuple(sliceobj), - arr.shape, - arr.dtype, - offset, - order) + fileslice(fobj, tuple(sliceobj), arr.shape, arr.dtype, offset, order) + times_arr[i, j] = timeit(f, number=repeat) def g(): fobj.seek(offset) data = fobj.read() np.ndarray(SHAPE, arr.dtype, buffer=data, order=order) + base_time = timeit(g, number=repeat) return times_arr, base_time -def bench_fileslice(bytes=True, - file_=True, - gz=True, - bz2=False, - zst=True): +def bench_fileslice(bytes=True, file_=True, gz=True, bz2=False, zst=True): sys.stdout.flush() repeat = 2 def my_table(title, times, base): print() - print(rst_table(times, ROW_NAMES, COL_NAMES, title, - val_fmt='{0[0]:3.2f} ({0[1]:3.2f})')) + print(rst_table(times, ROW_NAMES, COL_NAMES, title, val_fmt='{0[0]:3.2f} ({0[1]:3.2f})')) print(f'Base time: {base:3.2f}') + if bytes: fobj = BytesIO() times, base = run_slices(fobj, repeat) - my_table('Bytes slice - raw (ratio)', - np.dstack((times, times / base)), - base) + my_table('Bytes slice - raw (ratio)', np.dstack((times, times / base)), base) if file_: with InTemporaryDirectory(): file_times, file_base = run_slices('data.bin', repeat) - my_table('File slice - raw (ratio)', - np.dstack((file_times, file_times / file_base)), - file_base) + my_table( + 'File slice - raw (ratio)', np.dstack((file_times, file_times / file_base)), file_base + ) if gz: with InTemporaryDirectory(): gz_times, gz_base = run_slices('data.gz', repeat) - my_table('gz slice - raw (ratio)', - np.dstack((gz_times, gz_times / gz_base)), - gz_base) + my_table('gz slice - raw (ratio)', np.dstack((gz_times, gz_times / gz_base)), gz_base) if bz2: with InTemporaryDirectory(): bz2_times, bz2_base = run_slices('data.bz2', repeat) - my_table('bz2 slice - raw (ratio)', - np.dstack((bz2_times, bz2_times / bz2_base)), - bz2_base) + my_table('bz2 slice - raw (ratio)', np.dstack((bz2_times, bz2_times / bz2_base)), bz2_base) if zst and HAVE_ZSTD: with InTemporaryDirectory(): zst_times, zst_base = run_slices('data.zst', repeat) - my_table('zst slice - raw (ratio)', - np.dstack((zst_times, zst_times / zst_base)), - zst_base) + my_table('zst slice - raw (ratio)', np.dstack((zst_times, zst_times / zst_base)), zst_base) sys.stdout.flush() diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 1ca2bf95d0..0a6ff576fa 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -1,4 +1,4 @@ -""" Benchmarks for finite_range routine +"""Benchmarks for finite_range routine Run benchmarks with:: @@ -28,7 +28,7 @@ def bench_finite_range(): img_shape = (128, 128, 64, 10) arr = rng.normal(size=img_shape) sys.stdout.flush() - print_git_title("\nFinite range") + print_git_title('\nFinite range') mtime = measure('finite_range(arr)', repeat) print('%30s %6.2f' % ('float64 all finite', mtime)) arr[:, :, :, 1] = np.nan diff --git a/nibabel/benchmarks/bench_load_save.py b/nibabel/benchmarks/bench_load_save.py index 46118df43e..d9c6461959 100644 --- a/nibabel/benchmarks/bench_load_save.py +++ b/nibabel/benchmarks/bench_load_save.py @@ -1,4 +1,4 @@ -""" Benchmarks for load and save of image arrays +"""Benchmarks for load and save of image arrays Run benchmarks with:: @@ -34,7 +34,7 @@ def bench_load_save(): hdr = img.header sys.stdout.flush() print() - print_git_title("Image load save") + print_git_title('Image load save') hdr.set_data_dtype(np.float32) mtime = measure('sio.truncate(0); img.to_file_map()', repeat) print('%30s %6.2f' % ('Save float64 to float32', mtime)) diff --git a/nibabel/benchmarks/butils.py b/nibabel/benchmarks/butils.py index 4cc521ab66..01d6931eba 100644 --- a/nibabel/benchmarks/butils.py +++ b/nibabel/benchmarks/butils.py @@ -1,12 +1,11 @@ -""" Benchmarking utilities +"""Benchmarking utilities """ from .. import get_info def print_git_title(title): - """ Prints title string with git hash if possible, and underline - """ + """Prints title string with git hash if possible, and underline""" title = f"{title} for git revision {get_info()['commit_hash']}" print(title) print('-' * len(title)) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 666ff11251..4a330893b3 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -36,23 +36,14 @@ from .arrayproxy import ArrayProxy from .fileslice import strided_scalar -from .spatialimages import ( - SpatialImage, - SpatialHeader, - HeaderDataError, - ImageDataError -) +from .spatialimages import SpatialImage, SpatialHeader, HeaderDataError, ImageDataError from .volumeutils import Recoder # used for doc-tests filepath = os.path.dirname(os.path.realpath(__file__)) datadir = os.path.realpath(os.path.join(filepath, 'tests/data')) -_attr_dic = { - 'string': str, - 'integer': int, - 'float': float -} +_attr_dic = {'string': str, 'integer': int, 'float': float} _endian_dict = { 'LSB_FIRST': '<', @@ -66,11 +57,10 @@ 5: 'D', } -space_codes = Recoder(( - (0, 'unknown', ''), - (1, 'scanner', 'ORIG'), - (3, 'talairach', 'TLRC'), - (4, 'mni', 'MNI')), fields=('code', 'label', 'space')) +space_codes = Recoder( + ((0, 'unknown', ''), (1, 'scanner', 'ORIG'), (3, 'talairach', 'TLRC'), (4, 'mni', 'MNI')), + fields=('code', 'label', 'space'), +) class AFNIImageError(ImageDataError): @@ -114,8 +104,9 @@ def _unpack_var(var): TEMPLATE_SPACE ORIG """ - err_msg = ('Please check HEAD file to ensure it is AFNI compliant. ' - f'Offending attribute:\n{var}') + err_msg = ( + 'Please check HEAD file to ensure it is AFNI compliant. ' f'Offending attribute:\n{var}' + ) atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}') @@ -127,13 +118,15 @@ def _unpack_var(var): try: attr = [atype(f) for f in attr.split()] except ValueError: - raise AFNIHeaderError('Failed to read variable from HEAD file ' - f'due to improper type casting. {err_msg}') + raise AFNIHeaderError( + 'Failed to read variable from HEAD file ' + f'due to improper type casting. {err_msg}' + ) else: # AFNI string attributes will always start with open single quote and # end with a tilde (NUL). These attributes CANNOT contain tildes (so # stripping is safe), but can contain single quotes (so we replace) - attr = attr.replace('\'', '', 1).rstrip('~') + attr = attr.replace("'", '', 1).rstrip('~') return aname[0], attr[0] if len(attr) == 1 else attr @@ -165,12 +158,12 @@ def _get_datatype(info): bt = info['BRICK_TYPES'] if isinstance(bt, list): if np.unique(bt).size > 1: - raise AFNIImageError('Can\'t load file with multiple data types.') + raise AFNIImageError("Can't load file with multiple data types.") bt = bt[0] bo = _endian_dict.get(bo, '=') bt = _dtype_dict.get(bt, None) if bt is None: - raise AFNIImageError('Can\'t deduce image data type.') + raise AFNIImageError("Can't deduce image data type.") return np.dtype(bo + bt) @@ -208,7 +201,7 @@ def parse_AFNI_header(fobj): class AFNIArrayProxy(ArrayProxy): - """ Proxy object for AFNI image array. + """Proxy object for AFNI image array. Attributes ---------- @@ -244,10 +237,9 @@ def __init__(self, file_like, header, *, mmap=True, keep_file_open=None): effect. The default value (``None``) will result in the value of ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. """ - super(AFNIArrayProxy, self).__init__(file_like, - header, - mmap=mmap, - keep_file_open=keep_file_open) + super(AFNIArrayProxy, self).__init__( + file_like, header, mmap=mmap, keep_file_open=keep_file_open + ) self._scaling = header.get_data_scaling() @property @@ -299,9 +291,9 @@ def __init__(self, info): """ self.info = info dt = _get_datatype(self.info) - super(AFNIHeader, self).__init__(data_dtype=dt, - shape=self._calc_data_shape(), - zooms=self._calc_zooms()) + super(AFNIHeader, self).__init__( + data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() + ) @classmethod def from_header(klass, header=None): @@ -337,7 +329,7 @@ def _calc_data_shape(self): j, k. """ dset_rank = self.info['DATASET_RANK'] - shape = tuple(self.info['DATASET_DIMENSIONS'][:dset_rank[0]]) + shape = tuple(self.info['DATASET_DIMENSIONS'][: dset_rank[0]]) n_vols = dset_rank[1] return shape + (n_vols,) @@ -362,7 +354,13 @@ def _calc_zooms(self): origin", and second giving "Time step (TR)". """ xyz_step = tuple(np.abs(self.info['DELTA'])) - t_step = self.info.get('TAXIS_FLOATS', (0, 0,)) + t_step = self.info.get( + 'TAXIS_FLOATS', + ( + 0, + 0, + ), + ) if len(t_step) > 0: t_step = (t_step[1],) return xyz_step + t_step @@ -402,8 +400,7 @@ def get_affine(self): # AFNI default is RAI- == LPS+ == DICOM order. We need to flip RA sign # to align with nibabel RAS+ system affine = np.asarray(self.info['IJK_TO_DICOM_REAL']).reshape(3, 4) - affine = np.row_stack((affine * [[-1], [-1], [1]], - [0, 0, 0, 1])) + affine = np.row_stack((affine * [[-1], [-1], [1]], [0, 0, 0, 1])) return affine def get_data_scaling(self): @@ -526,10 +523,8 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): hdr = klass.header_class.from_fileobj(hdr_fobj) imgf = file_map['image'].fileobj imgf = file_map['image'].filename if imgf is None else imgf - data = klass.ImageArrayProxy(imgf, hdr.copy(), mmap=mmap, - keep_file_open=keep_file_open) - return klass(data, hdr.get_affine(), header=hdr, extra=None, - file_map=file_map) + data = klass.ImageArrayProxy(imgf, hdr.copy(), mmap=mmap, keep_file_open=keep_file_open) + return klass(data, hdr.get_affine(), header=hdr, extra=None, file_map=file_map) @classmethod def filespec_to_file_map(klass, filespec): @@ -568,7 +563,7 @@ def filespec_to_file_map(klass, filespec): fname = fholder.filename if key == 'header' and not os.path.exists(fname): for ext in klass._compressed_suffixes: - fname = fname[:-len(ext)] if fname.endswith(ext) else fname + fname = fname[: -len(ext)] if fname.endswith(ext) else fname elif key == 'image' and not os.path.exists(fname): for ext in klass._compressed_suffixes: if os.path.exists(fname + ext): diff --git a/nibabel/caret.py b/nibabel/caret.py index 9f05585cb2..e142922f26 100644 --- a/nibabel/caret.py +++ b/nibabel/caret.py @@ -12,7 +12,7 @@ class CaretMetaData(xml.XmlSerializable, MutableMapping): - """ A list of name-value pairs used in various Caret-based XML formats + """A list of name-value pairs used in various Caret-based XML formats * Description - Provides a simple method for user-supplied metadata that associates names with values. @@ -44,18 +44,18 @@ class CaretMetaData(xml.XmlSerializable, MutableMapping): >>> md.to_xml() b'keyval' """ + def __init__(self, *args, **kwargs): args, kwargs = self._sanitize(args, kwargs) self._data = dict(*args, **kwargs) @staticmethod def _sanitize(args, kwargs): - """ Override in subclasses to accept and warn on previous invocations - """ + """Override in subclasses to accept and warn on previous invocations""" return args, kwargs def __getitem__(self, key): - """ Get metadata entry by name + """Get metadata entry by name >>> md = CaretMetaData({'key': 'val'}) >>> md['key'] @@ -64,7 +64,7 @@ def __getitem__(self, key): return self._data[key] def __setitem__(self, key, value): - """ Set metadata entry by name + """Set metadata entry by name >>> md = CaretMetaData({'key': 'val'}) >>> dict(md) @@ -79,7 +79,7 @@ def __setitem__(self, key, value): self._data[key] = value def __delitem__(self, key): - """ Delete metadata entry by name + """Delete metadata entry by name >>> md = CaretMetaData({'key': 'val'}) >>> dict(md) @@ -91,7 +91,7 @@ def __delitem__(self, key): del self._data[key] def __len__(self): - """ Get length of metadata list + """Get length of metadata list >>> md = CaretMetaData({'key': 'val'}) >>> len(md) @@ -100,7 +100,7 @@ def __len__(self): return len(self._data) def __iter__(self): - """ Iterate over metadata entries + """Iterate over metadata entries >>> md = CaretMetaData({'key': 'val'}) >>> for key in md: @@ -110,7 +110,7 @@ def __iter__(self): return iter(self._data) def __repr__(self): - return f"<{self.__class__.__name__} {self._data!r}>" + return f'<{self.__class__.__name__} {self._data!r}>' def _to_xml_element(self): metadata = xml.Element('MetaData') diff --git a/nibabel/casting.py b/nibabel/casting.py index 45c2c5bd36..c2bceeaf0f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -1,4 +1,4 @@ -""" Utilities for casting numpy values in various ways +"""Utilities for casting numpy values in various ways Most routines work round some numpy oddities in floating point precision and casting. Others work round numpy casting to and from python ints @@ -24,7 +24,7 @@ class CastingError(Exception): def float_to_int(arr, int_type, nan2zero=True, infmax=False): - """ Convert floating point array `arr` to type `int_type` + """Convert floating point array `arr` to type `int_type` * Rounds numbers to nearest integer * Clips values to prevent overflows when casting @@ -114,7 +114,7 @@ def float_to_int(arr, int_type, nan2zero=True, infmax=False): def shared_range(flt_type, int_type): - """ Min and max in float type that are >=min, <=max in integer type + """Min and max in float type that are >=min, <=max in integer type This is not as easy as it sounds, because the float type may not be able to exactly represent the max or min integer values, so we have to find the @@ -172,12 +172,13 @@ def shared_range(flt_type, int_type): # types. # ---------------------------------------------------------------------------- + class FloatingError(Exception): pass def on_powerpc(): - """ True if we are running on a Power PC platform + """True if we are running on a Power PC platform Has to deal with older Macs and IBM POWER7 series among others """ @@ -185,7 +186,7 @@ def on_powerpc(): def type_info(np_type): - """ Return dict with min, max, nexp, nmant, width for numpy type `np_type` + """Return dict with min, max, nexp, nmant, width for numpy type `np_type` Type can be integer in which case nexp and nmant are None. @@ -225,20 +226,28 @@ def type_info(np_type): except ValueError: pass else: - return dict(min=np_type(info.min), max=np_type(info.max), minexp=None, - maxexp=None, nmant=None, nexp=None, width=width) + return dict( + min=np_type(info.min), + max=np_type(info.max), + minexp=None, + maxexp=None, + nmant=None, + nexp=None, + width=width, + ) info = np.finfo(dt) # Trust the standard IEEE types nmant, nexp = info.nmant, info.nexp - ret = dict(min=np_type(info.min), - max=np_type(info.max), - nmant=nmant, - nexp=nexp, - minexp=info.minexp, - maxexp=info.maxexp, - width=width) - if np_type in (np.float16, np.float32, np.float64, - np.complex64, np.complex128): + ret = dict( + min=np_type(info.min), + max=np_type(info.max), + nmant=nmant, + nexp=nexp, + minexp=info.minexp, + maxexp=info.maxexp, + width=width, + ) + if np_type in (np.float16, np.float32, np.float64, np.complex64, np.complex128): return ret info_64 = np.finfo(np.float64) if dt.kind == 'c': @@ -247,16 +256,18 @@ def type_info(np_type): else: assert np_type is np.longdouble vals = (nmant, nexp, width) - if vals in ((112, 15, 16), # binary128 - (info_64.nmant, info_64.nexp, 8), # float64 - (63, 15, 12), (63, 15, 16)): # Intel extended 80 + if vals in ( + (112, 15, 16), # binary128 + (info_64.nmant, info_64.nexp, 8), # float64 + (63, 15, 12), + (63, 15, 16), + ): # Intel extended 80 return ret # these are OK without modification # The remaining types are longdoubles with bad finfo values. Some we # correct, others we wait to hear of errors. # We start with float64 as basis ret = type_info(np.float64) - if vals in ((52, 15, 12), # windows float96 - (52, 15, 16)): # windows float128? + if vals in ((52, 15, 12), (52, 15, 16)): # windows float96 # windows float128? # On windows 32 bit at least, float96 is Intel 80 storage but operating # at float64 precision. The finfo values give nexp == 15 (as for intel # 80) but in calculations nexp in fact appears to be 11 as for float64 @@ -270,39 +281,32 @@ def type_info(np_type): # their complex equivalent. if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32): raise FloatingError(f'We had not expected type {np_type}') - if (vals == (1, 1, 16) and on_powerpc() and - _check_maxexp(np.longdouble, 1024)): + if vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024): # double pair on PPC. The _check_nmant routine does not work for this # type, hence the powerpc platform check instead ret.update(dict(nmant=106, width=width)) - elif (_check_nmant(np.longdouble, 52) and - _check_maxexp(np.longdouble, 11)): + elif _check_nmant(np.longdouble, 52) and _check_maxexp(np.longdouble, 11): # Got float64 despite everything pass - elif (_check_nmant(np.longdouble, 112) and - _check_maxexp(np.longdouble, 16384)): + elif _check_nmant(np.longdouble, 112) and _check_maxexp(np.longdouble, 16384): # binary 128, but with some busted type information. np.longcomplex # seems to break here too, so we need to use np.longdouble and # complexify two = np.longdouble(2) # See: https://matthew-brett.github.io/pydagogue/floating_point.html - max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383 + max_val = (two**113 - 1) / (two**112) * two**16383 if np_type is np.longcomplex: max_val += 0j - ret = dict(min=-max_val, - max=max_val, - nmant=112, - nexp=15, - minexp=-16382, - maxexp=16384, - width=width) + ret = dict( + min=-max_val, max=max_val, nmant=112, nexp=15, minexp=-16382, maxexp=16384, width=width + ) else: # don't recognize the type raise FloatingError(f'We had not expected long double type {np_type} with info {info}') return ret def _check_nmant(np_type, nmant): - """ True if fp type `np_type` seems to have `nmant` significand digits + """True if fp type `np_type` seems to have `nmant` significand digits Note 'digits' does not include implicit digits. And in fact if there are no implicit digits, the `nmant` number is one less than the actual digits. @@ -328,7 +332,7 @@ def _check_nmant(np_type, nmant): def _check_maxexp(np_type, maxexp): - """ True if fp type `np_type` seems to have `maxexp` maximum exponent + """True if fp type `np_type` seems to have `maxexp` maximum exponent We're testing "maxexp" as returned by numpy. This value is set to one greater than the maximum power of 2 that `np_type` can represent. @@ -351,12 +355,12 @@ def _check_maxexp(np_type, maxexp): np_type = dt.type two = np_type(2).reshape((1,)) # to avoid upcasting with warnings.catch_warnings(): - warnings.simplefilter("ignore", RuntimeWarning) # Expected overflow warning - return np.isfinite(two ** (maxexp - 1)) and not np.isfinite(two ** maxexp) + warnings.simplefilter('ignore', RuntimeWarning) # Expected overflow warning + return np.isfinite(two ** (maxexp - 1)) and not np.isfinite(two**maxexp) def as_int(x, check=True): - """ Return python integer representation of number + """Return python integer representation of number This is useful because the numpy int(val) mechanism is broken for large values in np.longdouble. @@ -417,7 +421,7 @@ def as_int(x, check=True): def int_to_float(val, flt_type): - """ Convert integer `val` to floating point type `flt_type` + """Convert integer `val` to floating point type `flt_type` Why is this so complicated? @@ -454,7 +458,7 @@ def int_to_float(val, flt_type): def floor_exact(val, flt_type): - """ Return nearest exact integer <= `val` in float type `flt_type` + """Return nearest exact integer <= `val` in float type `flt_type` Parameters ---------- @@ -508,14 +512,14 @@ def floor_exact(val, flt_type): if diff >= 0: # floating point value <= val return fval # Float casting made the value go up - biggest_gap = 2**(floor_log2(val) - info['nmant']) + biggest_gap = 2 ** (floor_log2(val) - info['nmant']) assert biggest_gap > 1 fval -= flt_type(biggest_gap) return fval def ceil_exact(val, flt_type): - """ Return nearest exact integer >= `val` in float type `flt_type` + """Return nearest exact integer >= `val` in float type `flt_type` Parameters ---------- @@ -559,7 +563,7 @@ def ceil_exact(val, flt_type): def int_abs(arr): - """ Absolute values of array taking care of max negative int values + """Absolute values of array taking care of max negative int values Parameters ---------- @@ -599,7 +603,7 @@ def int_abs(arr): def floor_log2(x): - """ floor of log2 of abs(`x`) + """floor of log2 of abs(`x`) Embarrassingly, from https://en.wikipedia.org/wiki/Binary_logarithm @@ -639,7 +643,7 @@ def floor_log2(x): def best_float(): - """ Floating point type with best precision + """Floating point type with best precision This is nearly always np.longdouble, except on Windows, where np.longdouble is Intel80 storage, but with float64 precision for calculations. In that @@ -662,15 +666,15 @@ def best_float(): long_info = type_info(np.longdouble) except FloatingError: return np.float64 - if (long_info['nmant'] > type_info(np.float64)['nmant'] and - machine() != 'sparc64'): # sparc has crazy-slow float128 + if ( + long_info['nmant'] > type_info(np.float64)['nmant'] and machine() != 'sparc64' + ): # sparc has crazy-slow float128 return np.longdouble return np.float64 def longdouble_lte_float64(): - """ Return True if longdouble appears to have the same precision as float64 - """ + """Return True if longdouble appears to have the same precision as float64""" return np.longdouble(2**53) == np.longdouble(2**53) + 1 @@ -679,7 +683,7 @@ def longdouble_lte_float64(): def longdouble_precision_improved(): - """ True if longdouble precision increased since initial import + """True if longdouble precision increased since initial import This can happen on Windows compiled with MSVC. It may be because libraries compiled with mingw (longdouble is Intel80) get linked to numpy compiled @@ -689,8 +693,7 @@ def longdouble_precision_improved(): def have_binary128(): - """ True if we have a binary128 IEEE longdouble - """ + """True if we have a binary128 IEEE longdouble""" try: ti = type_info(np.longdouble) except FloatingError: @@ -699,7 +702,7 @@ def have_binary128(): def ok_floats(): - """ Return floating point types sorted by precision + """Return floating point types sorted by precision Remove longdouble if it has no higher precision than float64 """ @@ -714,7 +717,7 @@ def ok_floats(): def able_int_type(values): - """ Find the smallest integer numpy type to contain sequence `values` + """Find the smallest integer numpy type to contain sequence `values` Prefers uint to int if minimum is >= 0 @@ -751,7 +754,7 @@ def able_int_type(values): def ulp(val=np.float64(1.0)): - """ Return gap between `val` and nearest representable number of same type + """Return gap between `val` and nearest representable number of same type This is the value of a unit in the last place (ULP), and is similar in meaning to the MATLAB eps function. @@ -785,4 +788,4 @@ def ulp(val=np.float64(1.0)): if fl2 is None or fl2 < info['minexp']: # subnormal fl2 = info['minexp'] # 'nmant' value does not include implicit first bit - return 2**(fl2 - info['nmant']) + return 2 ** (fl2 - info['nmant']) diff --git a/nibabel/cifti2/__init__.py b/nibabel/cifti2/__init__.py index c0933c9041..e7c999b6cd 100644 --- a/nibabel/cifti2/__init__.py +++ b/nibabel/cifti2/__init__.py @@ -18,12 +18,27 @@ """ from .parse_cifti2 import Cifti2Extension -from .cifti2 import (Cifti2MetaData, Cifti2Header, Cifti2Image, Cifti2Label, - Cifti2LabelTable, Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, Cifti2BrainModel, Cifti2Matrix, - Cifti2MatrixIndicesMap, Cifti2NamedMap, Cifti2Parcel, - Cifti2Surface, - Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, - Cifti2Vertices, Cifti2Volume, CIFTI_BRAIN_STRUCTURES, - Cifti2HeaderError, CIFTI_MODEL_TYPES, load, save) -from .cifti2_axes import (Axis, BrainModelAxis, ParcelsAxis, SeriesAxis, LabelAxis, ScalarAxis) +from .cifti2 import ( + Cifti2MetaData, + Cifti2Header, + Cifti2Image, + Cifti2Label, + Cifti2LabelTable, + Cifti2VertexIndices, + Cifti2VoxelIndicesIJK, + Cifti2BrainModel, + Cifti2Matrix, + Cifti2MatrixIndicesMap, + Cifti2NamedMap, + Cifti2Parcel, + Cifti2Surface, + Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2Vertices, + Cifti2Volume, + CIFTI_BRAIN_STRUCTURES, + Cifti2HeaderError, + CIFTI_MODEL_TYPES, + load, + save, +) +from .cifti2_axes import Axis, BrainModelAxis, ParcelsAxis, SeriesAxis, LabelAxis, ScalarAxis diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 31d631bb5f..4b6fd3df25 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to CIFTI-2 image format +"""Read / write access to CIFTI-2 image format Format of the NIFTI2 container format described here: @@ -41,74 +41,74 @@ def _float_01(val): class Cifti2HeaderError(Exception): - """ Error in CIFTI-2 header - """ + """Error in CIFTI-2 header""" _dtdefs = ( # code, label, dtype definition, niistring - (2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"), - (4, 'int16', np.int16, "NIFTI_TYPE_INT16"), - (8, 'int32', np.int32, "NIFTI_TYPE_INT32"), - (16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"), - (64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"), - (256, 'int8', np.int8, "NIFTI_TYPE_INT8"), - (512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"), - (768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"), - (1024, 'int64', np.int64, "NIFTI_TYPE_INT64"), - (1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"), + (2, 'uint8', np.uint8, 'NIFTI_TYPE_UINT8'), + (4, 'int16', np.int16, 'NIFTI_TYPE_INT16'), + (8, 'int32', np.int32, 'NIFTI_TYPE_INT32'), + (16, 'float32', np.float32, 'NIFTI_TYPE_FLOAT32'), + (64, 'float64', np.float64, 'NIFTI_TYPE_FLOAT64'), + (256, 'int8', np.int8, 'NIFTI_TYPE_INT8'), + (512, 'uint16', np.uint16, 'NIFTI_TYPE_UINT16'), + (768, 'uint32', np.uint32, 'NIFTI_TYPE_UINT32'), + (1024, 'int64', np.int64, 'NIFTI_TYPE_INT64'), + (1280, 'uint64', np.uint64, 'NIFTI_TYPE_UINT64'), ) # Make full code alias bank, including dtype column data_type_codes = make_dt_codes(_dtdefs) -CIFTI_MAP_TYPES = ('CIFTI_INDEX_TYPE_BRAIN_MODELS', - 'CIFTI_INDEX_TYPE_PARCELS', - 'CIFTI_INDEX_TYPE_SERIES', - 'CIFTI_INDEX_TYPE_SCALARS', - 'CIFTI_INDEX_TYPE_LABELS') +CIFTI_MAP_TYPES = ( + 'CIFTI_INDEX_TYPE_BRAIN_MODELS', + 'CIFTI_INDEX_TYPE_PARCELS', + 'CIFTI_INDEX_TYPE_SERIES', + 'CIFTI_INDEX_TYPE_SCALARS', + 'CIFTI_INDEX_TYPE_LABELS', +) CIFTI_MODEL_TYPES = ( 'CIFTI_MODEL_TYPE_SURFACE', # Modeled using surface vertices - 'CIFTI_MODEL_TYPE_VOXELS' # Modeled using voxels. + 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. ) -CIFTI_SERIESUNIT_TYPES = ('SECOND', - 'HERTZ', - 'METER', - 'RADIAN') - -CIFTI_BRAIN_STRUCTURES = ('CIFTI_STRUCTURE_ACCUMBENS_LEFT', - 'CIFTI_STRUCTURE_ACCUMBENS_RIGHT', - 'CIFTI_STRUCTURE_ALL_WHITE_MATTER', - 'CIFTI_STRUCTURE_ALL_GREY_MATTER', - 'CIFTI_STRUCTURE_AMYGDALA_LEFT', - 'CIFTI_STRUCTURE_AMYGDALA_RIGHT', - 'CIFTI_STRUCTURE_BRAIN_STEM', - 'CIFTI_STRUCTURE_CAUDATE_LEFT', - 'CIFTI_STRUCTURE_CAUDATE_RIGHT', - 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_LEFT', - 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_RIGHT', - 'CIFTI_STRUCTURE_CEREBELLUM', - 'CIFTI_STRUCTURE_CEREBELLUM_LEFT', - 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT', - 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_LEFT', - 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_RIGHT', - 'CIFTI_STRUCTURE_CORTEX', - 'CIFTI_STRUCTURE_CORTEX_LEFT', - 'CIFTI_STRUCTURE_CORTEX_RIGHT', - 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', - 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', - 'CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', - 'CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', - 'CIFTI_STRUCTURE_OTHER', - 'CIFTI_STRUCTURE_OTHER_GREY_MATTER', - 'CIFTI_STRUCTURE_OTHER_WHITE_MATTER', - 'CIFTI_STRUCTURE_PALLIDUM_LEFT', - 'CIFTI_STRUCTURE_PALLIDUM_RIGHT', - 'CIFTI_STRUCTURE_PUTAMEN_LEFT', - 'CIFTI_STRUCTURE_PUTAMEN_RIGHT', - 'CIFTI_STRUCTURE_THALAMUS_LEFT', - 'CIFTI_STRUCTURE_THALAMUS_RIGHT') +CIFTI_SERIESUNIT_TYPES = ('SECOND', 'HERTZ', 'METER', 'RADIAN') + +CIFTI_BRAIN_STRUCTURES = ( + 'CIFTI_STRUCTURE_ACCUMBENS_LEFT', + 'CIFTI_STRUCTURE_ACCUMBENS_RIGHT', + 'CIFTI_STRUCTURE_ALL_WHITE_MATTER', + 'CIFTI_STRUCTURE_ALL_GREY_MATTER', + 'CIFTI_STRUCTURE_AMYGDALA_LEFT', + 'CIFTI_STRUCTURE_AMYGDALA_RIGHT', + 'CIFTI_STRUCTURE_BRAIN_STEM', + 'CIFTI_STRUCTURE_CAUDATE_LEFT', + 'CIFTI_STRUCTURE_CAUDATE_RIGHT', + 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_LEFT', + 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_RIGHT', + 'CIFTI_STRUCTURE_CEREBELLUM', + 'CIFTI_STRUCTURE_CEREBELLUM_LEFT', + 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT', + 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_LEFT', + 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_RIGHT', + 'CIFTI_STRUCTURE_CORTEX', + 'CIFTI_STRUCTURE_CORTEX_LEFT', + 'CIFTI_STRUCTURE_CORTEX_RIGHT', + 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', + 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', + 'CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', + 'CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', + 'CIFTI_STRUCTURE_OTHER', + 'CIFTI_STRUCTURE_OTHER_GREY_MATTER', + 'CIFTI_STRUCTURE_OTHER_WHITE_MATTER', + 'CIFTI_STRUCTURE_PALLIDUM_LEFT', + 'CIFTI_STRUCTURE_PALLIDUM_RIGHT', + 'CIFTI_STRUCTURE_PUTAMEN_LEFT', + 'CIFTI_STRUCTURE_PUTAMEN_RIGHT', + 'CIFTI_STRUCTURE_THALAMUS_LEFT', + 'CIFTI_STRUCTURE_THALAMUS_RIGHT', +) def _value_if_klass(val, klass): @@ -118,7 +118,7 @@ def _value_if_klass(val, klass): def _underscore(string): - """ Convert a string from CamelCase to underscored """ + """Convert a string from CamelCase to underscored""" string = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', string) return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', string).lower() @@ -128,7 +128,7 @@ class LimitedNifti2Header(Nifti2Header): class Cifti2MetaData(CaretMetaData): - """ A list of name-value pairs + """A list of name-value pairs * Description - Provides a simple method for user-supplied metadata that associates names with values. @@ -146,9 +146,10 @@ class Cifti2MetaData(CaretMetaData): ---------- data : list of (name, value) tuples """ + @staticmethod def _sanitize(args, kwargs): - """ Sanitize and warn on deprecated arguments + """Sanitize and warn on deprecated arguments Accept metadata positional/keyword argument that can take ``None`` to indicate no initialization. @@ -175,20 +176,26 @@ def _sanitize(args, kwargs): >>> Cifti2MetaData(metadata='val') """ - if not args and list(kwargs) == ["metadata"]: - if not isinstance(kwargs["metadata"], str): - warn("Cifti2MetaData now has a dict-like interface and will " - "no longer accept the ``metadata`` keyword argument in " - "NiBabel 6.0. See ``pydoc dict`` for initialization options.", - FutureWarning, stacklevel=3) - md = kwargs.pop("metadata") + if not args and list(kwargs) == ['metadata']: + if not isinstance(kwargs['metadata'], str): + warn( + 'Cifti2MetaData now has a dict-like interface and will ' + 'no longer accept the ``metadata`` keyword argument in ' + 'NiBabel 6.0. See ``pydoc dict`` for initialization options.', + FutureWarning, + stacklevel=3, + ) + md = kwargs.pop('metadata') if md is not None: args = (md,) if args == (None,): - warn("Cifti2MetaData now has a dict-like interface and will no longer " - "accept the positional argument ``None`` in NiBabel 6.0. " - "See ``pydoc dict`` for initialization options.", - FutureWarning, stacklevel=3) + warn( + 'Cifti2MetaData now has a dict-like interface and will no longer ' + 'accept the positional argument ``None`` in NiBabel 6.0. ' + 'See ``pydoc dict`` for initialization options.', + FutureWarning, + stacklevel=3, + ) args = () return args, kwargs @@ -216,7 +223,7 @@ def difference_update(self, metadata): class Cifti2LabelTable(xml.XmlSerializable, MutableMapping): - r""" CIFTI-2 label table: a sequence of ``Cifti2Label``\s + r"""CIFTI-2 label table: a sequence of ``Cifti2Label``\s * Description - Used by NamedMap when IndicesMapToDataType is "CIFTI_INDEX_TYPE_LABELS" in order to associate names and display colors @@ -255,8 +262,10 @@ def __setitem__(self, key, value): try: self._labels[key] = Cifti2Label(*([key] + list(value))) except ValueError: - raise ValueError('Key should be int, value should be sequence ' - 'of str and 4 floats between 0 and 1') + raise ValueError( + 'Key should be int, value should be sequence ' + 'of str and 4 floats between 0 and 1' + ) def __delitem__(self, key): del self._labels[key] @@ -274,7 +283,7 @@ def _to_xml_element(self): class Cifti2Label(xml.XmlSerializable): - """ CIFTI-2 label: association of integer key with a name and RGBA values + """CIFTI-2 label: association of integer key with a name and RGBA values For all color components, value is floating point with range 0.0 to 1.0. @@ -311,7 +320,8 @@ class Cifti2Label(xml.XmlSerializable): alpha : float, optional Alpha color component for label (between 0 and 1). """ - def __init__(self, key=0, label='', red=0., green=0., blue=0., alpha=0.): + + def __init__(self, key=0, label='', red=0.0, green=0.0, blue=0.0, alpha=0.0): self.key = int(key) self.label = str(label) self.red = _float_01(red) @@ -321,7 +331,7 @@ def __init__(self, key=0, label='', red=0., green=0., blue=0., alpha=0.): @property def rgba(self): - """ Returns RGBA as tuple """ + """Returns RGBA as tuple""" return (self.red, self.green, self.blue, self.alpha) def _to_xml_element(self): @@ -377,6 +387,7 @@ class Cifti2NamedMap(xml.XmlSerializable): label_table : None or Cifti2LabelTable Label table associated with named map """ + def __init__(self, map_name=None, metadata=None, label_table=None): self.map_name = map_name self.metadata = metadata @@ -388,7 +399,7 @@ def metadata(self): @metadata.setter def metadata(self, metadata): - """ Set the metadata for this NamedMap + """Set the metadata for this NamedMap Parameters ---------- @@ -406,7 +417,7 @@ def label_table(self): @label_table.setter def label_table(self, label_table): - """ Set the label_table for this NamedMap + """Set the label_table for this NamedMap Parameters ---------- @@ -455,6 +466,7 @@ class Cifti2Surface(xml.XmlSerializable): surface_number_of_vertices : int Number of vertices on surface """ + def __init__(self, brain_structure=None, surface_number_of_vertices=None): self.brain_structure = brain_structure self.surface_number_of_vertices = surface_number_of_vertices @@ -486,6 +498,7 @@ class Cifti2VoxelIndicesIJK(xml.XmlSerializable, MutableSequence): Each element of this sequence is a triple of integers. """ + def __init__(self, indices=None): self._indices = [] if indices is not None: @@ -545,8 +558,7 @@ def _to_xml_element(self): raise Cifti2HeaderError('VoxelIndicesIJK element require an index table') vox_ind = xml.Element('VoxelIndicesIJK') - vox_ind.text = '\n'.join(' '.join([str(v) for v in row]) - for row in self._indices) + vox_ind.text = '\n'.join(' '.join([str(v) for v in row]) for row in self._indices) return vox_ind @@ -575,6 +587,7 @@ class Cifti2Vertices(xml.XmlSerializable, MutableSequence): A string from the BrainStructure list to identify what surface this vertex list is from (usually left cortex, right cortex, or cerebellum). """ + def __init__(self, brain_structure=None, vertices=None): self._vertices = [] if vertices is not None: @@ -642,14 +655,14 @@ class Cifti2Parcel(xml.XmlSerializable): vertices : list of Cifti2Vertices Vertices associated with parcel """ + def __init__(self, name=None, voxel_indices_ijk=None, vertices=None): self.name = name self._voxel_indices_ijk = voxel_indices_ijk self.vertices = vertices if vertices is not None else [] for val in self.vertices: if not isinstance(val, Cifti2Vertices): - raise ValueError('Cifti2Parcel vertices must be instances of ' - 'Cifti2Vertices') + raise ValueError('Cifti2Parcel vertices must be instances of ' 'Cifti2Vertices') @property def voxel_indices_ijk(self): @@ -660,18 +673,18 @@ def voxel_indices_ijk(self, value): self._voxel_indices_ijk = _value_if_klass(value, Cifti2VoxelIndicesIJK) def append_cifti_vertices(self, vertices): - """ Appends a Cifti2Vertices element to the Cifti2Parcel + """Appends a Cifti2Vertices element to the Cifti2Parcel Parameters ---------- vertices : Cifti2Vertices """ if not isinstance(vertices, Cifti2Vertices): - raise TypeError("Not a valid Cifti2Vertices instance") + raise TypeError('Not a valid Cifti2Vertices instance') self.vertices.append(vertices) def pop_cifti2_vertices(self, ith): - """ Pops the ith vertices element from the Cifti2Parcel """ + """Pops the ith vertices element from the Cifti2Parcel""" self.vertices.pop(ith) def _to_xml_element(self): @@ -712,6 +725,7 @@ class Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(xml.XmlSerializable): matrix : array-like shape (4, 4) Affine transformation matrix from voxel indices to RAS space. """ + # meterExponent = int # matrix = np.array @@ -726,8 +740,7 @@ def _to_xml_element(self): ) trans = xml.Element('TransformationMatrixVoxelIndicesIJKtoXYZ') trans.attrib['MeterExponent'] = str(self.meter_exponent) - trans.text = '\n'.join(' '.join(map('{:.10f}'.format, row)) - for row in self.matrix) + trans.text = '\n'.join(' '.join(map('{:.10f}'.format, row)) for row in self.matrix) return trans @@ -759,6 +772,7 @@ class Cifti2Volume(xml.XmlSerializable): : Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ Matrix that translates voxel indices to spatial coordinates """ + def __init__(self, volume_dimensions=None, transform_matrix=None): self.volume_dimensions = volume_dimensions self.transformation_matrix_voxel_indices_ijk_to_xyz = transform_matrix @@ -768,8 +782,7 @@ def _to_xml_element(self): raise Cifti2HeaderError('Volume element requires dimensions') volume = xml.Element('Volume') - volume.attrib['VolumeDimensions'] = ','.join( - [str(val) for val in self.volume_dimensions]) + volume.attrib['VolumeDimensions'] = ','.join([str(val) for val in self.volume_dimensions]) volume.append(self.transformation_matrix_voxel_indices_ijk_to_xyz._to_xml_element()) return volume @@ -792,6 +805,7 @@ class Cifti2VertexIndices(xml.XmlSerializable, MutableSequence): content. * Parent Element - BrainModel """ + def __init__(self, indices=None): self._indices = [] if indices is not None: @@ -830,7 +844,7 @@ def _to_xml_element(self): class Cifti2BrainModel(xml.XmlSerializable): - """ Element representing a mapping of the dimension to vertex or voxels. + """Element representing a mapping of the dimension to vertex or voxels. Mapping to vertices of voxels must be specified. @@ -886,9 +900,16 @@ class Cifti2BrainModel(xml.XmlSerializable): Indices of the vertices towards where the array indices are mapped """ - def __init__(self, index_offset=None, index_count=None, model_type=None, - brain_structure=None, n_surface_vertices=None, - voxel_indices_ijk=None, vertex_indices=None): + def __init__( + self, + index_offset=None, + index_count=None, + model_type=None, + brain_structure=None, + n_surface_vertices=None, + voxel_indices_ijk=None, + vertex_indices=None, + ): self.index_offset = index_offset self.index_count = index_count self.model_type = model_type @@ -917,8 +938,13 @@ def vertex_indices(self, value): def _to_xml_element(self): brain_model = xml.Element('BrainModel') - for key in ['IndexOffset', 'IndexCount', 'ModelType', 'BrainStructure', - 'SurfaceNumberOfVertices']: + for key in [ + 'IndexOffset', + 'IndexCount', + 'ModelType', + 'BrainStructure', + 'SurfaceNumberOfVertices', + ]: attr = _underscore(key) value = getattr(self, attr) if value is not None: @@ -987,23 +1013,26 @@ class Cifti2MatrixIndicesMap(xml.XmlSerializable, MutableSequence): series_unit : str, optional If it is a series, units """ + _valid_type_mappings_ = { Cifti2BrainModel: ('CIFTI_INDEX_TYPE_BRAIN_MODELS',), Cifti2Parcel: ('CIFTI_INDEX_TYPE_PARCELS',), Cifti2NamedMap: ('CIFTI_INDEX_TYPE_LABELS',), Cifti2Volume: ('CIFTI_INDEX_TYPE_SCALARS', 'CIFTI_INDEX_TYPE_SERIES'), - Cifti2Surface: ('CIFTI_INDEX_TYPE_SCALARS', 'CIFTI_INDEX_TYPE_SERIES') + Cifti2Surface: ('CIFTI_INDEX_TYPE_SCALARS', 'CIFTI_INDEX_TYPE_SERIES'), } - def __init__(self, applies_to_matrix_dimension, - indices_map_to_data_type, - number_of_series_points=None, - series_exponent=None, - series_start=None, - series_step=None, - series_unit=None, - maps=[], - ): + def __init__( + self, + applies_to_matrix_dimension, + indices_map_to_data_type, + number_of_series_points=None, + series_exponent=None, + series_start=None, + series_step=None, + series_unit=None, + maps=[], + ): self.applies_to_matrix_dimension = applies_to_matrix_dimension self.indices_map_to_data_type = indices_map_to_data_type self.number_of_series_points = number_of_series_points @@ -1025,22 +1054,15 @@ def __getitem__(self, index): return self._maps[index] def __setitem__(self, index, value): - if ( - isinstance(value, Cifti2Volume) and - ( - self.volume is not None and - not isinstance(self._maps[index], Cifti2Volume) - ) + if isinstance(value, Cifti2Volume) and ( + self.volume is not None and not isinstance(self._maps[index], Cifti2Volume) ): - raise Cifti2HeaderError("Only one Volume can be in a MatrixIndicesMap") + raise Cifti2HeaderError('Only one Volume can be in a MatrixIndicesMap') self._maps[index] = value def insert(self, index, value): - if ( - isinstance(value, Cifti2Volume) and - self.volume is not None - ): - raise Cifti2HeaderError("Only one Volume can be in a MatrixIndicesMap") + if isinstance(value, Cifti2Volume) and self.volume is not None: + raise Cifti2HeaderError('Only one Volume can be in a MatrixIndicesMap') self._maps.insert(index, value) @@ -1072,7 +1094,7 @@ def volume(self): @volume.setter def volume(self, volume): if not isinstance(volume, Cifti2Volume): - raise ValueError("You can only set a volume with a volume") + raise ValueError('You can only set a volume with a volume') for i, v in enumerate(self): if isinstance(v, Cifti2Volume): break @@ -1087,7 +1109,7 @@ def volume(self): if isinstance(v, Cifti2Volume): break else: - raise ValueError("No Cifti2Volume element") + raise ValueError('No Cifti2Volume element') del self[i] @property @@ -1105,8 +1127,14 @@ def _to_xml_element(self): mat_ind_map = xml.Element('MatrixIndicesMap') dims_as_strings = [str(dim) for dim in self.applies_to_matrix_dimension] mat_ind_map.attrib['AppliesToMatrixDimension'] = ','.join(dims_as_strings) - for key in ['IndicesMapToDataType', 'NumberOfSeriesPoints', 'SeriesExponent', - 'SeriesStart', 'SeriesStep', 'SeriesUnit']: + for key in [ + 'IndicesMapToDataType', + 'NumberOfSeriesPoints', + 'SeriesExponent', + 'SeriesStart', + 'SeriesStep', + 'SeriesUnit', + ]: attr = _underscore(key) value = getattr(self, attr) if value is not None: @@ -1118,7 +1146,7 @@ def _to_xml_element(self): class Cifti2Matrix(xml.XmlSerializable, MutableSequence): - """ CIFTI-2 Matrix object + """CIFTI-2 Matrix object This is a list-like container where the elements are instances of :class:`Cifti2MatrixIndicesMap`. @@ -1137,6 +1165,7 @@ class Cifti2Matrix(xml.XmlSerializable, MutableSequence): For each matrix (data) dimension, exactly one MatrixIndicesMap element must list it in the AppliesToMatrixDimension attribute. """ + def __init__(self): self._mims = [] self.metadata = None @@ -1147,7 +1176,7 @@ def metadata(self): @metadata.setter def metadata(self, meta): - """ Set the metadata for this Cifti2Header + """Set the metadata for this Cifti2Header Parameters ---------- @@ -1161,10 +1190,7 @@ def metadata(self, meta): def _get_indices_from_mim(self, mim): applies_to_matrix_dimension = mim.applies_to_matrix_dimension - if not isinstance( - applies_to_matrix_dimension, - Iterable - ): + if not isinstance(applies_to_matrix_dimension, Iterable): applies_to_matrix_dimension = (int(applies_to_matrix_dimension),) return applies_to_matrix_dimension @@ -1200,24 +1226,23 @@ def get_index_map(self, index): a2md = self._get_indices_from_mim(v) if index in a2md: return v - raise Cifti2HeaderError("Index not mapped") + raise Cifti2HeaderError('Index not mapped') def _validate_new_mim(self, value): if value.applies_to_matrix_dimension is None: raise Cifti2HeaderError( - "Cifti2MatrixIndicesMap needs to have " - "the applies_to_matrix_dimension attribute set" + 'Cifti2MatrixIndicesMap needs to have ' + 'the applies_to_matrix_dimension attribute set' ) a2md = self._get_indices_from_mim(value) if not set(self.mapped_indices).isdisjoint(a2md): raise Cifti2HeaderError( - "Indices in this Cifti2MatrixIndicesMap " - "already mapped in this matrix" + 'Indices in this Cifti2MatrixIndicesMap ' 'already mapped in this matrix' ) def __setitem__(self, key, value): if not isinstance(value, Cifti2MatrixIndicesMap): - raise TypeError("Not a valid Cifti2MatrixIndicesMap instance") + raise TypeError('Not a valid Cifti2MatrixIndicesMap instance') self._validate_new_mim(value) self._mims[key] = value @@ -1232,7 +1257,7 @@ def __len__(self): def insert(self, index, value): if not isinstance(value, Cifti2MatrixIndicesMap): - raise TypeError("Not a valid Cifti2MatrixIndicesMap instance") + raise TypeError('Not a valid Cifti2MatrixIndicesMap instance') self._validate_new_mim(value) self._mims.insert(index, value) @@ -1261,6 +1286,7 @@ def get_axis(self, index): axis : :class:`.cifti2_axes.Axis` """ from . import cifti2_axes + return cifti2_axes.from_index_mapping(self.get_index_map(index)) def get_data_shape(self): @@ -1270,6 +1296,7 @@ def get_data_shape(self): Any dimensions omitted in the CIFTI-2 header will be given a default size of None. """ from . import cifti2_axes + if len(self.mapped_indices) == 0: return () base_shape = [None] * (max(self.mapped_indices) + 1) @@ -1281,9 +1308,9 @@ def get_data_shape(self): class Cifti2Header(FileBasedHeader, xml.XmlSerializable): - """ Class for CIFTI-2 header extension """ + """Class for CIFTI-2 header extension""" - def __init__(self, matrix=None, version="2.0"): + def __init__(self, matrix=None, version='2.0'): FileBasedHeader.__init__(self) xml.XmlSerializable.__init__(self) if matrix is None: @@ -1305,6 +1332,7 @@ def __eq__(self, other): @classmethod def may_contain_header(klass, binaryblock): from .parse_cifti2 import _Cifti2AsNiftiHeader + return _Cifti2AsNiftiHeader.may_contain_header(binaryblock) @property @@ -1370,26 +1398,23 @@ def from_axes(cls, axes): new header describing the rows/columns in a format consistent with Cifti2 """ from . import cifti2_axes + return cifti2_axes.to_header(axes) class Cifti2Image(DataobjImage, SerializableImage): - """ Class for single file CIFTI-2 format image - """ + """Class for single file CIFTI-2 format image""" + header_class = Cifti2Header valid_exts = Nifti2Image.valid_exts files_types = Nifti2Image.files_types makeable = False rw = True - def __init__(self, - dataobj=None, - header=None, - nifti_header=None, - extra=None, - file_map=None, - dtype=None): - """ Initialize image + def __init__( + self, dataobj=None, header=None, nifti_header=None, extra=None, file_map=None, dtype=None + ): + """Initialize image The image is a combination of (dataobj, header), with optional metadata in `nifti_header` (a NIfTI2 header). There may be more metadata in the @@ -1415,8 +1440,7 @@ def __init__(self, """ if not isinstance(header, Cifti2Header) and header: header = Cifti2Header.from_axes(header) - super(Cifti2Image, self).__init__(dataobj, header=header, - extra=extra, file_map=file_map) + super(Cifti2Image, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) self._nifti_header = LimitedNifti2Header.from_header(nifti_header) # if NIfTI header not specified, get data type from input array @@ -1427,8 +1451,10 @@ def __init__(self, self.update_headers() if self._dataobj.shape != self.header.matrix.get_data_shape(): - warn(f"Dataobj shape {self._dataobj.shape} does not match shape " - f"expected from CIFTI-2 header {self.header.matrix.get_data_shape()}") + warn( + f'Dataobj shape {self._dataobj.shape} does not match shape ' + f'expected from CIFTI-2 header {self.header.matrix.get_data_shape()}' + ) @property def nifti_header(self): @@ -1436,7 +1462,7 @@ def nifti_header(self): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Load a CIFTI-2 image from a file_map + """Load a CIFTI-2 image from a file_map Parameters ---------- @@ -1446,10 +1472,12 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): ------- img : Cifti2Image Returns a Cifti2Image - """ + """ from .parse_cifti2 import _Cifti2AsNiftiImage, Cifti2Extension - nifti_img = _Cifti2AsNiftiImage.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) + + nifti_img = _Cifti2AsNiftiImage.from_file_map( + file_map, mmap=mmap, keep_file_open=keep_file_open + ) # Get cifti2 header for item in nifti_img.header.extensions: @@ -1457,20 +1485,21 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): cifti_header = item.get_content() break else: - raise ValueError('NIfTI2 header does not contain a CIFTI-2 ' - 'extension') + raise ValueError('NIfTI2 header does not contain a CIFTI-2 ' 'extension') # Construct cifti image. # Use array proxy object where possible dataobj = nifti_img.dataobj - return Cifti2Image(reshape_dataobj(dataobj, dataobj.shape[4:]), - header=cifti_header, - nifti_header=nifti_img.header, - file_map=file_map) + return Cifti2Image( + reshape_dataobj(dataobj, dataobj.shape[4:]), + header=cifti_header, + nifti_header=nifti_img.header, + file_map=file_map, + ) @classmethod def from_image(klass, img): - """ Class method to create new instance of own class from `img` + """Class method to create new instance of own class from `img` Parameters ---------- @@ -1487,7 +1516,7 @@ def from_image(klass, img): raise NotImplementedError def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -1500,6 +1529,7 @@ def to_file_map(self, file_map=None, dtype=None): None """ from .parse_cifti2 import Cifti2Extension + self.update_headers() header = self._nifti_header extension = Cifti2Extension(content=self.header.to_xml()) @@ -1509,13 +1539,13 @@ def to_file_map(self, file_map=None, dtype=None): header.extensions.append(extension) if self._dataobj.shape != self.header.matrix.get_data_shape(): raise ValueError( - f"Dataobj shape {self._dataobj.shape} does not match shape " - f"expected from CIFTI-2 header {self.header.matrix.get_data_shape()}") + f'Dataobj shape {self._dataobj.shape} does not match shape ' + f'expected from CIFTI-2 header {self.header.matrix.get_data_shape()}' + ) # if intent code is not set, default to unknown CIFTI if header.get_intent()[0] == 'none': header.set_intent('NIFTI_INTENT_CONNECTIVITY_UNKNOWN') - data = reshape_dataobj(self.dataobj, - (1, 1, 1, 1) + self.dataobj.shape) + data = reshape_dataobj(self.dataobj, (1, 1, 1, 1) + self.dataobj.shape) # If qform not set, reset pixdim values so Nifti2 does not complain if header['qform_code'] == 0: header['pixdim'][:4] = 1 @@ -1523,7 +1553,7 @@ def to_file_map(self, file_map=None, dtype=None): img.to_file_map(file_map or self.file_map) def update_headers(self): - """ Harmonize NIfTI headers with image data + """Harmonize NIfTI headers with image data Ensures that the NIfTI-2 header records the data shape in the last three ``dim`` fields. Per the spec: diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 174222e189..31e4ab55ab 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -136,11 +136,13 @@ def from_index_mapping(mim): ------- axis : subclass of :class:`Axis` """ - return_type = {'CIFTI_INDEX_TYPE_SCALARS': ScalarAxis, - 'CIFTI_INDEX_TYPE_LABELS': LabelAxis, - 'CIFTI_INDEX_TYPE_SERIES': SeriesAxis, - 'CIFTI_INDEX_TYPE_BRAIN_MODELS': BrainModelAxis, - 'CIFTI_INDEX_TYPE_PARCELS': ParcelsAxis} + return_type = { + 'CIFTI_INDEX_TYPE_SCALARS': ScalarAxis, + 'CIFTI_INDEX_TYPE_LABELS': LabelAxis, + 'CIFTI_INDEX_TYPE_SERIES': SeriesAxis, + 'CIFTI_INDEX_TYPE_BRAIN_MODELS': BrainModelAxis, + 'CIFTI_INDEX_TYPE_PARCELS': ParcelsAxis, + } return return_type[mim.indices_map_to_data_type].from_index_mapping(mim) @@ -242,8 +244,9 @@ class BrainModelAxis(Axis): This Axis describes which vertex/voxel is represented by each row/column. """ - def __init__(self, name, voxel=None, vertex=None, affine=None, - volume_shape=None, nvertices=None): + def __init__( + self, name, voxel=None, vertex=None, affine=None, volume_shape=None, nvertices=None + ): """ New BrainModelAxis axes can be constructed by passing on the greyordinate brain-structure names and voxel/vertex indices to the constructor or by one of the @@ -275,7 +278,7 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, """ if voxel is None: if vertex is None: - raise ValueError("At least one of voxel or vertex indices should be defined") + raise ValueError('At least one of voxel or vertex indices should be defined') nelements = len(vertex) self.voxel = np.full((nelements, 3), fill_value=-1, dtype=int) else: @@ -294,8 +297,10 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, if nvertices is None: self.nvertices = {} else: - self.nvertices = {self.to_cifti_brain_structure_name(name): number - for name, number in nvertices.items()} + self.nvertices = { + self.to_cifti_brain_structure_name(name): number + for name, number in nvertices.items() + } for name in list(self.nvertices.keys()): if name not in self.name: @@ -307,8 +312,10 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, self.volume_shape = None else: if affine is None or volume_shape is None: - raise ValueError("Affine and volume shape should be defined " - "for BrainModelAxis containing voxels") + raise ValueError( + 'Affine and volume shape should be defined ' + 'for BrainModelAxis containing voxels' + ) self.affine = np.asanyarray(affine) self.volume_shape = volume_shape @@ -318,10 +325,12 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, raise ValueError('Undefined voxel indices found for volumetric elements') for check_name in ('name', 'voxel', 'vertex'): - shape = (self.size, 3) if check_name == 'voxel' else (self.size, ) + shape = (self.size, 3) if check_name == 'voxel' else (self.size,) if getattr(self, check_name).shape != shape: - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for BrainModelAxis axis") + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for BrainModelAxis axis' + ) @classmethod def from_mask(cls, mask, name='other', affine=None): @@ -348,7 +357,9 @@ def from_mask(cls, mask, name='other', affine=None): else: affine = np.asanyarray(affine) if affine.shape != (4, 4): - raise ValueError(f"Affine transformation should be a 4x4 array or None, not {affine!r}") + raise ValueError( + f'Affine transformation should be a 4x4 array or None, not {affine!r}' + ) mask = np.asanyarray(mask) if mask.ndim == 1: @@ -357,8 +368,10 @@ def from_mask(cls, mask, name='other', affine=None): voxels = np.array(np.where(mask != 0)).T return cls(name, voxel=voxels, affine=affine, volume_shape=mask.shape) else: - raise ValueError("Mask should be either 1-dimensional (for surfaces) or " - "3-dimensional (for volumes), not %i-dimensional" % mask.ndim) + raise ValueError( + 'Mask should be either 1-dimensional (for surfaces) or ' + '3-dimensional (for volumes), not %i-dimensional' % mask.ndim + ) @classmethod def from_surface(cls, vertices, nvertex, name='Other'): @@ -379,8 +392,7 @@ def from_surface(cls, vertices, nvertex, name='Other'): BrainModelAxis which covers (part of) the surface """ cifti_name = cls.to_cifti_brain_structure_name(name) - return cls(cifti_name, vertex=vertices, - nvertices={cifti_name: nvertex}) + return cls(cifti_name, vertex=vertices, nvertices={cifti_name: nvertex}) @classmethod def from_index_mapping(cls, mim): @@ -407,10 +419,10 @@ def from_index_mapping(cls, mim): is_surface = bm.model_type == 'CIFTI_MODEL_TYPE_SURFACE' name.extend([bm.brain_structure] * bm.index_count) if is_surface: - vertex[bm.index_offset: index_end] = bm.vertex_indices + vertex[bm.index_offset : index_end] = bm.vertex_indices nvertices[bm.brain_structure] = bm.surface_number_of_vertices else: - voxel[bm.index_offset: index_end, :] = bm.voxel_indices_ijk + voxel[bm.index_offset : index_end, :] = bm.voxel_indices_ijk if affine is None: shape = mim.volume.volume_dimensions affine = mim.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix @@ -444,9 +456,13 @@ def to_mapping(self, dim): affine = cifti2.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, self.affine) mim.volume = cifti2.Cifti2Volume(self.volume_shape, affine) cifti_bm = cifti2.Cifti2BrainModel( - to_slice.start, len(bm), - 'CIFTI_MODEL_TYPE_SURFACE' if is_surface else 'CIFTI_MODEL_TYPE_VOXELS', - name, nvertex, voxels, vertices + to_slice.start, + len(bm), + 'CIFTI_MODEL_TYPE_SURFACE' if is_surface else 'CIFTI_MODEL_TYPE_VOXELS', + name, + nvertex, + voxels, + vertices, ) mim.append(cifti_bm) return mim @@ -466,7 +482,7 @@ def iter_structures(self): start_name = self.name[idx_start] for idx_current, name in enumerate(self.name): if start_name != name: - yield start_name, slice(idx_start, idx_current), self[idx_start: idx_current] + yield start_name, slice(idx_start, idx_current), self[idx_start:idx_current] idx_start = idx_current start_name = self.name[idx_start] yield start_name, slice(idx_start, None), self[idx_start:] @@ -518,14 +534,14 @@ def to_cifti_brain_structure_name(name): if poss_orient == name.lower()[:idx]: orientation = poss_orient if name[idx] in '_ ': - structure = name[idx + 1:] + structure = name[idx + 1 :] else: structure = name[idx:] break if poss_orient == name.lower()[-idx:]: orientation = poss_orient if name[-idx - 1] in '_ ': - structure = name[:-idx - 1] + structure = name[: -idx - 1] else: structure = name[:-idx] break @@ -537,8 +553,10 @@ def to_cifti_brain_structure_name(name): else: proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}_{orientation.upper()}' if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: - raise ValueError(f'{name} was interpreted as {proposed_name}, which is not ' - 'a valid CIFTI brain structure') + raise ValueError( + f'{name} was interpreted as {proposed_name}, which is not ' + 'a valid CIFTI brain structure' + ) return proposed_name @property @@ -586,17 +604,16 @@ def volume_shape(self, value): if value is not None: value = tuple(value) if len(value) != 3: - raise ValueError("Volume shape should be a tuple of length 3") + raise ValueError('Volume shape should be a tuple of length 3') if not all(isinstance(v, int) for v in value): - raise ValueError("All elements of the volume shape should be integers") + raise ValueError('All elements of the volume shape should be integers') self._volume_shape = value _name = None @property def name(self): - """The brain structure to which the voxel/vertices of belong - """ + """The brain structure to which the voxel/vertices of belong""" return self._name @name.setter @@ -612,13 +629,15 @@ def __eq__(self, other): if xor(self.affine is None, other.affine is None): return False return ( - (self.affine is None or - np.allclose(self.affine, other.affine) and - self.volume_shape == other.volume_shape) and - self.nvertices == other.nvertices and - np.array_equal(self.name, other.name) and - np.array_equal(self.voxel[self.volume_mask], other.voxel[other.volume_mask]) and - np.array_equal(self.vertex[self.surface_mask], other.vertex[other.surface_mask]) + ( + self.affine is None + or np.allclose(self.affine, other.affine) + and self.volume_shape == other.volume_shape + ) + and self.nvertices == other.nvertices + and np.array_equal(self.name, other.name) + and np.array_equal(self.voxel[self.volume_mask], other.voxel[other.volume_mask]) + and np.array_equal(self.vertex[self.surface_mask], other.vertex[other.surface_mask]) ) def __add__(self, other): @@ -641,23 +660,27 @@ def __add__(self, other): else: affine, shape = self.affine, self.volume_shape if other.affine is not None and ( - not np.allclose(other.affine, affine) or - other.volume_shape != shape + not np.allclose(other.affine, affine) or other.volume_shape != shape ): - raise ValueError("Trying to concatenate two BrainModels defined " - "in a different brain volume") + raise ValueError( + 'Trying to concatenate two BrainModels defined ' 'in a different brain volume' + ) nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError("Trying to concatenate two BrainModels with " - f"inconsistent number of vertices for {name}") + raise ValueError( + 'Trying to concatenate two BrainModels with ' + f'inconsistent number of vertices for {name}' + ) nvertices[name] = value return self.__class__( - np.append(self.name, other.name), - np.concatenate((self.voxel, other.voxel), 0), - np.append(self.vertex, other.vertex), - affine, shape, nvertices + np.append(self.name, other.name), + np.concatenate((self.voxel, other.voxel), 0), + np.append(self.vertex, other.vertex), + affine, + shape, + nvertices, ) def __getitem__(self, item): @@ -680,9 +703,15 @@ def __getitem__(self, item): if isinstance(item, int): return self.get_element(item) if isinstance(item, str): - raise IndexError("Can not index an Axis with a string (except for ParcelsAxis)") - return self.__class__(self.name[item], self.voxel[item], self.vertex[item], - self.affine, self.volume_shape, self.nvertices) + raise IndexError('Can not index an Axis with a string (except for ParcelsAxis)') + return self.__class__( + self.name[item], + self.voxel[item], + self.vertex[item], + self.affine, + self.volume_shape, + self.nvertices, + ) def get_element(self, index): """ @@ -758,13 +787,17 @@ def __init__(self, name, voxels, vertices, affine=None, volume_shape=None, nvert if nvertices is None: self.nvertices = {} else: - self.nvertices = {BrainModelAxis.to_cifti_brain_structure_name(name): number - for name, number in nvertices.items()} + self.nvertices = { + BrainModelAxis.to_cifti_brain_structure_name(name): number + for name, number in nvertices.items() + } for check_name in ('name', 'voxels', 'vertices'): - if getattr(self, check_name).shape != (self.size, ): - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for Parcel axis") + if getattr(self, check_name).shape != (self.size,): + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for Parcel axis' + ) @classmethod def from_brain_models(cls, named_brain_models): @@ -796,16 +829,20 @@ def from_brain_models(cls, named_brain_models): affine = bm.affine volume_shape = bm.volume_shape elif not np.allclose(affine, bm.affine) or (volume_shape != bm.volume_shape): - raise ValueError("Can not combine brain models defined in different " - "volumes into a single Parcel axis") + raise ValueError( + 'Can not combine brain models defined in different ' + 'volumes into a single Parcel axis' + ) all_voxels[idx_parcel] = voxels vertices = {} for name, _, bm_part in bm.iter_structures(): if name in bm.nvertices.keys(): if name in nvertices.keys() and nvertices[name] != bm.nvertices[name]: - raise ValueError("Got multiple conflicting number of " - f"vertices for surface structure {name}") + raise ValueError( + 'Got multiple conflicting number of ' + f'vertices for surface structure {name}' + ) nvertices[name] = bm.nvertices[name] vertices[name] = bm_part.vertex all_vertices[idx_parcel] = vertices @@ -846,7 +883,9 @@ def from_index_mapping(cls, mim): name = vertex.brain_structure vertices[vertex.brain_structure] = np.array(vertex) if name not in nvertices.keys(): - raise ValueError(f"Number of vertices for surface structure {name} not defined") + raise ValueError( + f'Number of vertices for surface structure {name} not defined' + ) all_voxels[idx_parcel] = voxels all_vertices[idx_parcel] = vertices all_names.append(parcel.name) @@ -910,25 +949,28 @@ def volume_shape(self, value): if value is not None: value = tuple(value) if len(value) != 3: - raise ValueError("Volume shape should be a tuple of length 3") + raise ValueError('Volume shape should be a tuple of length 3') if not all(isinstance(v, int) for v in value): - raise ValueError("All elements of the volume shape should be integers") + raise ValueError('All elements of the volume shape should be integers') self._volume_shape = value def __len__(self): return self.name.size def __eq__(self, other): - if (self.__class__ != other.__class__ or len(self) != len(other) or - not np.array_equal(self.name, other.name) or self.nvertices != other.nvertices or - any(not np.array_equal(vox1, vox2) - for vox1, vox2 in zip(self.voxels, other.voxels))): + if ( + self.__class__ != other.__class__ + or len(self) != len(other) + or not np.array_equal(self.name, other.name) + or self.nvertices != other.nvertices + or any(not np.array_equal(vox1, vox2) for vox1, vox2 in zip(self.voxels, other.voxels)) + ): return False if self.affine is not None: if ( - other.affine is None or - not np.allclose(self.affine, other.affine) or - self.volume_shape != other.volume_shape + other.affine is None + or not np.allclose(self.affine, other.affine) + or self.volume_shape != other.volume_shape ): return False elif other.affine is not None: @@ -960,21 +1002,27 @@ def __add__(self, other): affine, shape = other.affine, other.volume_shape else: affine, shape = self.affine, self.volume_shape - if other.affine is not None and (not np.allclose(other.affine, affine) or - other.volume_shape != shape): - raise ValueError("Trying to concatenate two ParcelsAxis defined " - "in a different brain volume") + if other.affine is not None and ( + not np.allclose(other.affine, affine) or other.volume_shape != shape + ): + raise ValueError( + 'Trying to concatenate two ParcelsAxis defined ' 'in a different brain volume' + ) nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError("Trying to concatenate two ParcelsAxis with " - f"inconsistent number of vertices for {name}") + raise ValueError( + 'Trying to concatenate two ParcelsAxis with ' + f'inconsistent number of vertices for {name}' + ) nvertices[name] = value return self.__class__( - np.append(self.name, other.name), - np.append(self.voxels, other.voxels), - np.append(self.vertices, other.vertices), - affine, shape, nvertices + np.append(self.name, other.name), + np.append(self.voxels, other.voxels), + np.append(self.vertices, other.vertices), + affine, + shape, + nvertices, ) def __getitem__(self, item): @@ -988,14 +1036,20 @@ def __getitem__(self, item): if isinstance(item, str): idx = np.where(self.name == item)[0] if len(idx) == 0: - raise IndexError(f"Parcel {item} not found") + raise IndexError(f'Parcel {item} not found') if len(idx) > 1: - raise IndexError(f"Multiple parcels with name {item} found") + raise IndexError(f'Multiple parcels with name {item} found') return self.voxels[idx[0]], self.vertices[idx[0]] if isinstance(item, int): return self.get_element(item) - return self.__class__(self.name[item], self.voxels[item], self.vertices[item], - self.affine, self.volume_shape, self.nvertices) + return self.__class__( + self.name[item], + self.voxels[item], + self.vertices[item], + self.affine, + self.volume_shape, + self.nvertices, + ) def get_element(self, index): """ @@ -1039,9 +1093,11 @@ def __init__(self, name, meta=None): self.meta = np.asanyarray(meta, dtype='object') for check_name in ('name', 'meta'): - if getattr(self, check_name).shape != (self.size, ): - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for ScalarAxis axis") + if getattr(self, check_name).shape != (self.size,): + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for ScalarAxis axis' + ) @classmethod def from_index_mapping(cls, mim): @@ -1115,8 +1171,8 @@ def __add__(self, other): if not isinstance(other, ScalarAxis): return NotImplemented return ScalarAxis( - np.append(self.name, other.name), - np.append(self.meta, other.meta), + np.append(self.name, other.name), + np.append(self.meta, other.meta), ) def __getitem__(self, item): @@ -1172,9 +1228,11 @@ def __init__(self, name, label, meta=None): self.meta = np.asanyarray(meta, dtype='object') for check_name in ('name', 'meta', 'label'): - if getattr(self, check_name).shape != (self.size, ): - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for LabelAxis axis") + if getattr(self, check_name).shape != (self.size,): + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for LabelAxis axis' + ) @classmethod def from_index_mapping(cls, mim): @@ -1189,8 +1247,10 @@ def from_index_mapping(cls, mim): ------- LabelAxis """ - tables = [{key: (value.label, value.rgba) for key, value in nm.label_table.items()} - for nm in mim.named_maps] + tables = [ + {key: (value.label, value.rgba) for key, value in nm.label_table.items()} + for nm in mim.named_maps + ] rest = ScalarAxis.from_index_mapping(mim) return LabelAxis(rest.name, tables, rest.meta) @@ -1212,8 +1272,7 @@ def to_mapping(self, dim): label_table = cifti2.Cifti2LabelTable() for key, value in label.items(): label_table[key] = (value[0],) + tuple(value[1]) - named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), - label_table) + named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), label_table) mim.append(named_map) return mim @@ -1236,9 +1295,9 @@ def __eq__(self, other): if not isinstance(other, LabelAxis) or self.size != other.size: return False return ( - np.array_equal(self.name, other.name) and - np.array_equal(self.meta, other.meta) and - np.array_equal(self.label, other.label) + np.array_equal(self.name, other.name) + and np.array_equal(self.meta, other.meta) + and np.array_equal(self.label, other.label) ) def __add__(self, other): @@ -1257,9 +1316,9 @@ def __add__(self, other): if not isinstance(other, LabelAxis): return NotImplemented return LabelAxis( - np.append(self.name, other.name), - np.append(self.label, other.label), - np.append(self.meta, other.meta), + np.append(self.name, other.name), + np.append(self.label, other.label), + np.append(self.meta, other.meta), ) def __getitem__(self, item): @@ -1292,9 +1351,10 @@ class SeriesAxis(Axis): This Axis describes the time point of each row/column. """ + size = None - def __init__(self, start, step, size, unit="SECOND"): + def __init__(self, start, step, size, unit='SECOND'): """ Creates a new SeriesAxis axis @@ -1331,8 +1391,8 @@ def from_index_mapping(cls, mim): ------- SeriesAxis """ - start = mim.series_start * 10 ** mim.series_exponent - step = mim.series_step * 10 ** mim.series_exponent + start = mim.series_start * 10**mim.series_exponent + step = mim.series_step * 10**mim.series_exponent return cls(start, step, mim.number_of_series_points, mim.series_unit) def to_mapping(self, dim): @@ -1364,9 +1424,10 @@ def unit(self): @unit.setter def unit(self, value): - if value.upper() not in ("SECOND", "HERTZ", "METER", "RADIAN"): - raise ValueError("SeriesAxis unit should be one of " + - "('second', 'hertz', 'meter', or 'radian'") + if value.upper() not in ('SECOND', 'HERTZ', 'METER', 'RADIAN'): + raise ValueError( + 'SeriesAxis unit should be one of ' + "('second', 'hertz', 'meter', or 'radian'" + ) self._unit = value.upper() def __len__(self): @@ -1377,11 +1438,11 @@ def __eq__(self, other): True if start, step, size, and unit are the same. """ return ( - isinstance(other, SeriesAxis) and - self.start == other.start and - self.step == other.step and - self.size == other.size and - self.unit == other.unit + isinstance(other, SeriesAxis) + and self.start == other.start + and self.step == other.step + and self.size == other.size + and self.unit == other.unit ) def __add__(self, other): @@ -1415,12 +1476,16 @@ def __add__(self, other): def __getitem__(self, item): if isinstance(item, slice): step = 1 if item.step is None else item.step - idx_start = ((self.size - 1 if step < 0 else 0) - if item.start is None else - (item.start if item.start >= 0 else self.size + item.start)) - idx_end = ((-1 if step < 0 else self.size) - if item.stop is None else - (item.stop if item.stop >= 0 else self.size + item.stop)) + idx_start = ( + (self.size - 1 if step < 0 else 0) + if item.start is None + else (item.start if item.start >= 0 else self.size + item.start) + ) + idx_end = ( + (-1 if step < 0 else self.size) + if item.stop is None + else (item.stop if item.stop >= 0 else self.size + item.stop) + ) if idx_start > self.size and step < 0: idx_start = self.size - 1 if idx_end > self.size: @@ -1428,12 +1493,15 @@ def __getitem__(self, item): nelements = (idx_end - idx_start) // step if nelements < 0: nelements = 0 - return SeriesAxis(idx_start * self.step + self.start, self.step * step, - nelements, self.unit) + return SeriesAxis( + idx_start * self.step + self.start, self.step * step, nelements, self.unit + ) elif isinstance(item, int): return self.get_element(item) - raise IndexError('SeriesAxis can only be indexed with integers or slices ' - 'without breaking the regular structure') + raise IndexError( + 'SeriesAxis can only be indexed with integers or slices ' + 'without breaking the regular structure' + ) def get_element(self, index): """ @@ -1452,6 +1520,8 @@ def get_element(self, index): if index < 0: index = self.size + index if index >= self.size or index < 0: - raise IndexError("index %i is out of range for SeriesAxis with size %i" % - (original_index, self.size)) + raise IndexError( + 'index %i is out of range for SeriesAxis with size %i' + % (original_index, self.size) + ) return self.start + self.step * index diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index a3ed49711d..36db0fa290 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -12,13 +12,27 @@ from packaging.version import Version, parse -from .cifti2 import (Cifti2MetaData, Cifti2Header, Cifti2Label, - Cifti2LabelTable, Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, Cifti2BrainModel, Cifti2Matrix, - Cifti2MatrixIndicesMap, Cifti2NamedMap, Cifti2Parcel, - Cifti2Surface, Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, - Cifti2Vertices, Cifti2Volume, CIFTI_BRAIN_STRUCTURES, - CIFTI_MODEL_TYPES, _underscore, Cifti2HeaderError) +from .cifti2 import ( + Cifti2MetaData, + Cifti2Header, + Cifti2Label, + Cifti2LabelTable, + Cifti2VertexIndices, + Cifti2VoxelIndicesIJK, + Cifti2BrainModel, + Cifti2Matrix, + Cifti2MatrixIndicesMap, + Cifti2NamedMap, + Cifti2Parcel, + Cifti2Surface, + Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2Vertices, + Cifti2Volume, + CIFTI_BRAIN_STRUCTURES, + CIFTI_MODEL_TYPES, + _underscore, + Cifti2HeaderError, +) from .. import xmlutils as xml from ..spatialimages import HeaderDataError from ..batteryrunners import Report @@ -44,51 +58,42 @@ def _mangle(self, value): return value.to_xml() -extension_codes.add_codes(( - (Cifti2Extension.code, 'cifti', Cifti2Extension),)) - -intent_codes.add_codes(( - # The codes below appear on the CIFTI-2 standard - # http://www.nitrc.org/plugins/mwiki/index.php/cifti:ConnectivityMatrixFileFormats - # https://www.nitrc.org/forum/attachment.php?attachid=341&group_id=454&forum_id=1955 - (3000, 'ConnUnknown', (), 'NIFTI_INTENT_CONNECTIVITY_UNKNOWN'), - (3001, 'ConnDense', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE'), - (3002, 'ConnDenseSeries', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES'), - (3003, 'ConnParcels', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED'), - (3004, 'ConnParcelSries', (), - "NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SERIES"), - (3006, 'ConnDenseScalar', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_SCALARS'), - (3007, 'ConnDenseLabel', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_LABELS'), - (3008, 'ConnParcelScalr', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SCALAR'), - (3009, 'ConnParcelDense', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_DENSE'), - (3010, 'ConnDenseParcel', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_PARCELLATED'), - (3011, 'ConnPPSr', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SERIES'), - (3012, 'ConnPPSc', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SCALAR'))) +extension_codes.add_codes(((Cifti2Extension.code, 'cifti', Cifti2Extension),)) + +intent_codes.add_codes( + ( + # The codes below appear on the CIFTI-2 standard + # http://www.nitrc.org/plugins/mwiki/index.php/cifti:ConnectivityMatrixFileFormats + # https://www.nitrc.org/forum/attachment.php?attachid=341&group_id=454&forum_id=1955 + (3000, 'ConnUnknown', (), 'NIFTI_INTENT_CONNECTIVITY_UNKNOWN'), + (3001, 'ConnDense', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE'), + (3002, 'ConnDenseSeries', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES'), + (3003, 'ConnParcels', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED'), + (3004, 'ConnParcelSries', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SERIES'), + (3006, 'ConnDenseScalar', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_SCALARS'), + (3007, 'ConnDenseLabel', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_LABELS'), + (3008, 'ConnParcelScalr', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SCALAR'), + (3009, 'ConnParcelDense', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_DENSE'), + (3010, 'ConnDenseParcel', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_PARCELLATED'), + (3011, 'ConnPPSr', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SERIES'), + (3012, 'ConnPPSc', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SCALAR'), + ) +) class _Cifti2AsNiftiHeader(Nifti2Header): - """ Class for Cifti2 header extension """ + """Class for Cifti2 header extension""" @classmethod def _valid_intent_code(klass, intent_code): - """ Return True if `intent_code` matches our class `klass` - """ + """Return True if `intent_code` matches our class `klass`""" return intent_code >= 3000 and intent_code < 3100 @classmethod def may_contain_header(klass, binaryblock): if not super(_Cifti2AsNiftiHeader, klass).may_contain_header(binaryblock): return False - hdr = klass(binaryblock=binaryblock[:klass.sizeof_hdr]) + hdr = klass(binaryblock=binaryblock[: klass.sizeof_hdr]) return klass._valid_intent_code(hdr.get_intent('code')[0]) @staticmethod @@ -120,17 +125,19 @@ def _chk_pixdims(hdr, fix=False): class _Cifti2AsNiftiImage(Nifti2Image): - """ Load a NIfTI2 image with a Cifti2 header """ + """Load a NIfTI2 image with a Cifti2 header""" + header_class = _Cifti2AsNiftiHeader makeable = False class Cifti2Parser(xml.XmlParser): """Class to parse an XML string into a CIFTI-2 header object""" + def __init__(self, encoding=None, buffer_size=3500000, verbose=0): - super(Cifti2Parser, self).__init__(encoding=encoding, - buffer_size=buffer_size, - verbose=verbose) + super(Cifti2Parser, self).__init__( + encoding=encoding, buffer_size=buffer_size, verbose=verbose + ) self.fsm_state = [] self.struct_state = [] @@ -152,7 +159,7 @@ def StartElementHandler(self, name, attrs): # create cifti2 image self.header = Cifti2Header() self.header.version = ver = attrs['Version'] - if parse(ver) < Version("2"): + if parse(ver) < Version('2'): raise ValueError(f'Only CIFTI-2 files are supported; found version {ver}') self.fsm_state.append('CIFTI') self.struct_state.append(self.header) @@ -193,15 +200,18 @@ def StartElementHandler(self, name, attrs): elif name == 'MatrixIndicesMap': self.fsm_state.append('MatrixIndicesMap') - dimensions = [int(value) for value in attrs["AppliesToMatrixDimension"].split(',')] + dimensions = [int(value) for value in attrs['AppliesToMatrixDimension'].split(',')] mim = Cifti2MatrixIndicesMap( applies_to_matrix_dimension=dimensions, - indices_map_to_data_type=attrs["IndicesMapToDataType"]) - for key, dtype in [("NumberOfSeriesPoints", int), - ("SeriesExponent", int), - ("SeriesStart", float), - ("SeriesStep", float), - ("SeriesUnit", str)]: + indices_map_to_data_type=attrs['IndicesMapToDataType'], + ) + for key, dtype in [ + ('NumberOfSeriesPoints', int), + ('SeriesExponent', int), + ('SeriesStart', float), + ('SeriesStep', float), + ('SeriesUnit', str), + ]: if key in attrs: setattr(mim, _underscore(key), dtype(attrs[key])) matrix = self.struct_state[-1] @@ -226,7 +236,7 @@ def StartElementHandler(self, name, attrs): elif name == 'LabelTable': named_map = self.struct_state[-1] mim = self.struct_state[-2] - if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_LABELS": + if mim.indices_map_to_data_type != 'CIFTI_INDEX_TYPE_LABELS': raise Cifti2HeaderError( 'LabelTable element can only be a child of a MatrixIndicesMap ' 'with CIFTI_INDEX_TYPE_LABELS type' @@ -247,16 +257,16 @@ def StartElementHandler(self, name, attrs): 'Label element can only be a child of the CIFTI-2 LabelTable element' ) label = Cifti2Label() - label.key = int(attrs["Key"]) - label.red = float(attrs["Red"]) - label.green = float(attrs["Green"]) - label.blue = float(attrs["Blue"]) - label.alpha = float(attrs["Alpha"]) + label.key = int(attrs['Key']) + label.red = float(attrs['Red']) + label.green = float(attrs['Green']) + label.blue = float(attrs['Blue']) + label.alpha = float(attrs['Alpha']) self.write_to = 'Label' self.fsm_state.append('Label') self.struct_state.append(label) - elif name == "MapName": + elif name == 'MapName': named_map = self.struct_state[-1] if not isinstance(named_map, Cifti2NamedMap): raise Cifti2HeaderError( @@ -266,52 +276,50 @@ def StartElementHandler(self, name, attrs): self.fsm_state.append('MapName') self.write_to = 'MapName' - elif name == "Surface": + elif name == 'Surface': surface = Cifti2Surface() mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( 'Surface element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_PARCELS": + if mim.indices_map_to_data_type != 'CIFTI_INDEX_TYPE_PARCELS': raise Cifti2HeaderError( 'Surface element can only be a child of a MatrixIndicesMap ' 'with CIFTI_INDEX_TYPE_PARCELS type' ) - surface.brain_structure = attrs["BrainStructure"] - surface.surface_number_of_vertices = int(attrs["SurfaceNumberOfVertices"]) + surface.brain_structure = attrs['BrainStructure'] + surface.surface_number_of_vertices = int(attrs['SurfaceNumberOfVertices']) mim.append(surface) - elif name == "Parcel": + elif name == 'Parcel': parcel = Cifti2Parcel() mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( 'Parcel element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - parcel.name = attrs["Name"] + parcel.name = attrs['Name'] mim.append(parcel) self.fsm_state.append('Parcel') self.struct_state.append(parcel) - elif name == "Vertices": + elif name == 'Vertices': vertices = Cifti2Vertices() parcel = self.struct_state[-1] if not isinstance(parcel, Cifti2Parcel): raise Cifti2HeaderError( 'Vertices element can only be a child of the CIFTI-2 Parcel element' ) - vertices.brain_structure = attrs["BrainStructure"] + vertices.brain_structure = attrs['BrainStructure'] if vertices.brain_structure not in CIFTI_BRAIN_STRUCTURES: - raise Cifti2HeaderError( - 'BrainStructure for this Vertices element is not valid' - ) + raise Cifti2HeaderError('BrainStructure for this Vertices element is not valid') parcel.append_cifti_vertices(vertices) self.fsm_state.append('Vertices') self.struct_state.append(vertices) self.write_to = 'Vertices' - elif name == "VoxelIndicesIJK": + elif name == 'VoxelIndicesIJK': parent = self.struct_state[-1] if not isinstance(parent, (Cifti2Parcel, Cifti2BrainModel)): raise Cifti2HeaderError( @@ -321,20 +329,19 @@ def StartElementHandler(self, name, attrs): parent.voxel_indices_ijk = Cifti2VoxelIndicesIJK() self.write_to = 'VoxelIndices' - elif name == "Volume": + elif name == 'Volume': mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( 'Volume element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - dimensions = tuple([int(val) for val in - attrs["VolumeDimensions"].split(',')]) + dimensions = tuple([int(val) for val in attrs['VolumeDimensions'].split(',')]) volume = Cifti2Volume(volume_dimensions=dimensions) mim.append(volume) self.fsm_state.append('Volume') self.struct_state.append(volume) - elif name == "TransformationMatrixVoxelIndicesIJKtoXYZ": + elif name == 'TransformationMatrixVoxelIndicesIJKtoXYZ': volume = self.struct_state[-1] if not isinstance(volume, Cifti2Volume): raise Cifti2HeaderError( @@ -342,13 +349,13 @@ def StartElementHandler(self, name, attrs): 'of the CIFTI-2 Volume element' ) transform = Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ() - transform.meter_exponent = int(attrs["MeterExponent"]) + transform.meter_exponent = int(attrs['MeterExponent']) volume.transformation_matrix_voxel_indices_ijk_to_xyz = transform self.fsm_state.append('TransformMatrix') self.struct_state.append(transform) self.write_to = 'TransformMatrix' - elif name == "BrainModel": + elif name == 'BrainModel': model = Cifti2BrainModel() mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): @@ -356,31 +363,29 @@ def StartElementHandler(self, name, attrs): 'BrainModel element can only be a child ' 'of the CIFTI-2 MatrixIndicesMap element' ) - if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_BRAIN_MODELS": + if mim.indices_map_to_data_type != 'CIFTI_INDEX_TYPE_BRAIN_MODELS': raise Cifti2HeaderError( 'BrainModel element can only be a child of a MatrixIndicesMap ' 'with CIFTI_INDEX_TYPE_BRAIN_MODELS type' ) - for key, dtype in [("IndexOffset", int), - ("IndexCount", int), - ("ModelType", str), - ("BrainStructure", str), - ("SurfaceNumberOfVertices", int)]: + for key, dtype in [ + ('IndexOffset', int), + ('IndexCount', int), + ('ModelType', str), + ('BrainStructure', str), + ('SurfaceNumberOfVertices', int), + ]: if key in attrs: setattr(model, _underscore(key), dtype(attrs[key])) if model.brain_structure not in CIFTI_BRAIN_STRUCTURES: - raise Cifti2HeaderError( - 'BrainStructure for this BrainModel element is not valid' - ) + raise Cifti2HeaderError('BrainStructure for this BrainModel element is not valid') if model.model_type not in CIFTI_MODEL_TYPES: - raise Cifti2HeaderError( - 'ModelType for this BrainModel element is not valid' - ) + raise Cifti2HeaderError('ModelType for this BrainModel element is not valid') mim.append(model) self.fsm_state.append('BrainModel') self.struct_state.append(model) - elif name == "VertexIndices": + elif name == 'VertexIndices': index = Cifti2VertexIndices() model = self.struct_state[-1] if not isinstance(model, Cifti2BrainModel): @@ -391,7 +396,7 @@ def StartElementHandler(self, name, attrs): self.fsm_state.append('VertexIndices') model.vertex_indices = index self.struct_state.append(index) - self.write_to = "VertexIndices" + self.write_to = 'VertexIndices' def EndElementHandler(self, name): self.flush_chardata() @@ -444,42 +449,42 @@ def EndElementHandler(self, name): lata.append(label) self.write_to = None - elif name == "MapName": + elif name == 'MapName': self.fsm_state.pop() self.write_to = None - elif name == "Parcel": + elif name == 'Parcel': self.fsm_state.pop() self.struct_state.pop() - elif name == "Vertices": + elif name == 'Vertices': self.fsm_state.pop() self.struct_state.pop() self.write_to = None - elif name == "VoxelIndicesIJK": + elif name == 'VoxelIndicesIJK': self.write_to = None - elif name == "Volume": + elif name == 'Volume': self.fsm_state.pop() self.struct_state.pop() - elif name == "TransformationMatrixVoxelIndicesIJKtoXYZ": + elif name == 'TransformationMatrixVoxelIndicesIJKtoXYZ': self.fsm_state.pop() self.struct_state.pop() self.write_to = None - elif name == "BrainModel": + elif name == 'BrainModel': self.fsm_state.pop() self.struct_state.pop() - elif name == "VertexIndices": + elif name == 'VertexIndices': self.fsm_state.pop() self.struct_state.pop() self.write_to = None def CharacterDataHandler(self, data): - """ Collect character data chunks pending collation + """Collect character data chunks pending collation The parser breaks the data up into chunks of size depending on the buffer_size of the parser. A large bit of character data, with standard @@ -492,8 +497,7 @@ def CharacterDataHandler(self, data): self._char_blocks.append(data) def flush_chardata(self): - """ Collate and process collected character data - """ + """Collate and process collected character data""" if self._char_blocks is None: return # Just join the strings to get the data. Maybe there are some memory @@ -552,7 +556,7 @@ def flush_chardata(self): @property def pending_data(self): - " True if there is character data pending for processing " + """True if there is character data pending for processing""" return self._char_blocks is not None diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index 21cd83e80e..ecb6be272b 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -43,7 +43,9 @@ def get_parcels(): Parcel axis """ bml = list(get_brain_models()) - return axes.ParcelsAxis.from_brain_models([('mixed', bml[0] + bml[2]), ('volume', bml[1]), ('surface', bml[3])]) + return axes.ParcelsAxis.from_brain_models( + [('mixed', bml[0] + bml[2]), ('volume', bml[1]), ('surface', bml[3])] + ) def get_scalar(): @@ -79,7 +81,7 @@ def get_series(): yield axes.SeriesAxis(3, 10, 4) yield axes.SeriesAxis(8, 10, 3) yield axes.SeriesAxis(3, 2, 4) - yield axes.SeriesAxis(5, 10, 5, "HERTZ") + yield axes.SeriesAxis(5, 10, 5, 'HERTZ') def get_axes(): @@ -123,8 +125,9 @@ def test_brain_models(): assert (bml[4].voxel == -1).all() assert (bml[4].vertex == [2, 9, 14]).all() - for bm, label, is_surface in zip(bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'], - (False, False, True, True)): + for bm, label, is_surface in zip( + bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'], (False, False, True, True) + ): assert np.all(bm.surface_mask == ~bm.volume_mask) structures = list(bm.iter_structures()) assert len(structures) == 1 @@ -162,7 +165,7 @@ def test_brain_models(): bmt.volume_shape = (5, 3, 1) with pytest.raises(ValueError): - bmt.volume_shape = (5., 3, 1) + bmt.volume_shape = (5.0, 3, 1) with pytest.raises(ValueError): bmt.volume_shape = (5, 3, 1, 4) @@ -170,7 +173,9 @@ def test_brain_models(): bmt['thalamus_left'] # Test the constructor - bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_vox = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + ) assert np.all(bm_vox.name == ['CIFTI_STRUCTURE_THALAMUS_LEFT'] * 5) assert np.array_equal(bm_vox.vertex, np.full(5, -1)) assert np.array_equal(bm_vox.voxel, np.full((5, 3), 1)) @@ -179,30 +184,53 @@ def test_brain_models(): axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4)) with pytest.raises(ValueError): # no affine - axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4) + ) with pytest.raises(ValueError): # incorrect name - axes.BrainModelAxis('random_name', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'random_name', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): # negative voxel indices - axes.BrainModelAxis('thalamus_left', voxel=-np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', + voxel=-np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): # no voxels or vertices axes.BrainModelAxis('thalamus_left', affine=np.eye(4), volume_shape=(2, 3, 4)) with pytest.raises(ValueError): # incorrect voxel shape - axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 2), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', + voxel=np.ones((5, 2), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) - bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + bm_vertex = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) assert np.array_equal(bm_vertex.name, ['CIFTI_STRUCTURE_CORTEX_LEFT'] * 5) assert np.array_equal(bm_vertex.vertex, np.full(5, 1)) assert np.array_equal(bm_vertex.voxel, np.full((5, 3), -1)) with pytest.raises(ValueError): axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int)) with pytest.raises(ValueError): - axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20}) + axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20} + ) with pytest.raises(ValueError): - axes.BrainModelAxis('cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + axes.BrainModelAxis( + 'cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) # test from_mask errors with pytest.raises(ValueError): @@ -213,11 +241,12 @@ def test_brain_models(): axes.BrainModelAxis.from_mask(np.ones((5, 3))) # tests error in adding together or combining as ParcelsAxis - bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_vox = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + ) bm_vox + bm_vox - assert (bm_vertex + bm_vox)[:bm_vertex.size] == bm_vertex - assert (bm_vox + bm_vertex)[:bm_vox.size] == bm_vox + assert (bm_vertex + bm_vox)[: bm_vertex.size] == bm_vertex + assert (bm_vox + bm_vertex)[: bm_vox.size] == bm_vox for bm_added in (bm_vox + bm_vertex, bm_vertex + bm_vox): assert bm_added.nvertices == bm_vertex.nvertices assert np.all(bm_added.affine == bm_vox.affine) @@ -227,29 +256,39 @@ def test_brain_models(): with pytest.raises(Exception): bm_vox + get_label() - bm_other_shape = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4), volume_shape=(4, 3, 4)) + bm_other_shape = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(4, 3, 4) + ) with pytest.raises(ValueError): bm_vox + bm_other_shape with pytest.raises(ValueError): axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_shape)]) - bm_other_affine = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4) * 2, volume_shape=(2, 3, 4)) + bm_other_affine = axes.BrainModelAxis( + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4) * 2, + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): bm_vox + bm_other_affine with pytest.raises(ValueError): axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_affine)]) - bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) - bm_other_number = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 30}) + bm_vertex = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) + bm_other_number = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 30} + ) with pytest.raises(ValueError): bm_vertex + bm_other_number with pytest.raises(ValueError): axes.ParcelsAxis.from_brain_models([('a', bm_vertex), ('b', bm_other_number)]) # test equalities - bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_vox = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + ) bm_other = deepcopy(bm_vox) assert bm_vox == bm_other bm_other.voxel[1, 0] = 0 @@ -276,7 +315,9 @@ def test_brain_models(): bm_other.volume_shape = (10, 3, 4) assert bm_vox != bm_other - bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + bm_vertex = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) bm_other = deepcopy(bm_vertex) assert bm_vertex == bm_other bm_other.voxel[1, 0] = 0 @@ -308,31 +349,31 @@ def test_parcels(): """ prc = get_parcels() assert isinstance(prc, axes.ParcelsAxis) - assert prc[0] == ('mixed', ) + prc['mixed'] + assert prc[0] == ('mixed',) + prc['mixed'] assert prc['mixed'][0].shape == (3, 3) assert len(prc['mixed'][1]) == 1 - assert prc['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3, ) + assert prc['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3,) - assert prc[1] == ('volume', ) + prc['volume'] + assert prc[1] == ('volume',) + prc['volume'] assert prc['volume'][0].shape == (4, 3) assert len(prc['volume'][1]) == 0 - assert prc[2] == ('surface', ) + prc['surface'] + assert prc[2] == ('surface',) + prc['surface'] assert prc['surface'][0].shape == (0, 3) assert len(prc['surface'][1]) == 1 - assert prc['surface'][1]['CIFTI_STRUCTURE_OTHER'].shape == (4, ) + assert prc['surface'][1]['CIFTI_STRUCTURE_OTHER'].shape == (4,) prc2 = prc + prc assert len(prc2) == 6 assert (prc2.affine == prc.affine).all() - assert (prc2.nvertices == prc.nvertices) - assert (prc2.volume_shape == prc.volume_shape) + assert prc2.nvertices == prc.nvertices + assert prc2.volume_shape == prc.volume_shape assert prc2[:3] == prc assert prc2[3:] == prc assert prc2[3:]['mixed'][0].shape == (3, 3) assert len(prc2[3:]['mixed'][1]) == 1 - assert prc2[3:]['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3, ) + assert prc2[3:]['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3,) with pytest.raises(IndexError): prc['non_existent'] @@ -351,7 +392,7 @@ def test_parcels(): prc.volume_shape = (5, 3, 1) with pytest.raises(ValueError): - prc.volume_shape = (5., 3, 1) + prc.volume_shape = (5.0, 3, 1) with pytest.raises(ValueError): prc.volume_shape = (5, 3, 1, 4) @@ -412,7 +453,7 @@ def test_parcels(): assert prc != prc_other prc_other = deepcopy(prc) - prc_other.vertices[0]['CIFTI_STRUCTURE_CORTEX_LEFT'] = np.ones((8, ), dtype='i4') + prc_other.vertices[0]['CIFTI_STRUCTURE_CORTEX_LEFT'] = np.ones((8,), dtype='i4') assert prc != prc_other prc_other = deepcopy(prc) @@ -425,20 +466,20 @@ def test_parcels(): # test direct initialisation axes.ParcelsAxis( - voxels=[np.ones((3, 2), dtype=int)], - vertices=[{}], - name=['single_voxel'], - affine=np.eye(4), - volume_shape=(2, 3, 4), + voxels=[np.ones((3, 2), dtype=int)], + vertices=[{}], + name=['single_voxel'], + affine=np.eye(4), + volume_shape=(2, 3, 4), ) with pytest.raises(ValueError): axes.ParcelsAxis( - voxels=[np.ones((3, 2), dtype=int)], - vertices=[{}], - name=[['single_voxel']], # wrong shape name array - affine=np.eye(4), - volume_shape=(2, 3, 4), + voxels=[np.ones((3, 2), dtype=int)], + vertices=[{}], + name=[['single_voxel']], # wrong shape name array + affine=np.eye(4), + volume_shape=(2, 3, 4), ) @@ -609,10 +650,10 @@ def test_series(): assert sr == sr[:] for key, value in ( - ('start', 20), - ('step', 7), - ('size', 14), - ('unit', 'HERTZ'), + ('start', 20), + ('step', 7), + ('size', 14), + ('unit', 'HERTZ'), ): sr_other = deepcopy(sr) assert sr == sr_other @@ -638,11 +679,10 @@ def test_common_interface(): assert axis1 == axis2 concatenated = axis1 + axis2 assert axis1 != concatenated - assert axis1 == concatenated[:axis1.size] + assert axis1 == concatenated[: axis1.size] if isinstance(axis1, axes.SeriesAxis): - assert axis2 != concatenated[axis1.size:] + assert axis2 != concatenated[axis1.size :] else: - assert axis2 == concatenated[axis1.size:] + assert axis2 == concatenated[axis1.size :] assert len(axis1) == axis1.size - diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index b04d1db585..be10f8b0e0 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -1,4 +1,4 @@ -""" Testing CIFTI-2 objects +"""Testing CIFTI-2 objects """ import collections from xml.etree import ElementTree @@ -81,7 +81,10 @@ def test_cifti2_metadata(): with pytest.raises(KeyError): md.difference_update({'a': 'aval', 'd': 'dval'}) - assert md.to_xml().decode('utf-8') == 'bbval' + assert ( + md.to_xml().decode('utf-8') + == 'bbval' + ) def test__float_01(): @@ -108,7 +111,6 @@ def test_cifti2_labeltable(): lt.to_xml() with pytest.raises(ci.Cifti2HeaderError): lt._to_xml_element() - label = ci.Cifti2Label(label='Test', key=0) lt[0] = label @@ -132,9 +134,9 @@ def test_cifti2_labeltable(): with pytest.raises(ValueError): lt[0] = test_tuple[:-1] - + with pytest.raises(ValueError): - lt[0] = ('foo', 1.1, 0, 0, 1) + lt[0] = ('foo', 1.1, 0, 0, 1) with pytest.raises(ValueError): lt[0] = ('foo', 1.0, -1, 0, 1) @@ -143,14 +145,15 @@ def test_cifti2_labeltable(): lt[0] = ('foo', 1.0, 0, -0.1, 1) - def test_cifti2_label(): lb = ci.Cifti2Label() lb.label = 'Test' lb.key = 0 assert lb.rgba == (0, 0, 0, 0) - assert compare_xml_leaf(lb.to_xml().decode('utf-8'), - "") + assert compare_xml_leaf( + lb.to_xml().decode('utf-8'), + "", + ) lb.red = 0 lb.green = 0.1 @@ -158,8 +161,10 @@ def test_cifti2_label(): lb.alpha = 0.3 assert lb.rgba == (0, 0.1, 0.2, 0.3) - assert compare_xml_leaf(lb.to_xml().decode('utf-8'), - "") + assert compare_xml_leaf( + lb.to_xml().decode('utf-8'), + "", + ) lb.red = 10 with pytest.raises(ci.Cifti2HeaderError): @@ -176,20 +181,25 @@ def test_cifti2_parcel(): pl = ci.Cifti2Parcel() with pytest.raises(ci.Cifti2HeaderError): pl.to_xml() - + with pytest.raises(TypeError): pl.append_cifti_vertices(None) - + with pytest.raises(ValueError): ci.Cifti2Parcel(vertices=[1, 2, 3]) - pl = ci.Cifti2Parcel(name='region', - voxel_indices_ijk=ci.Cifti2VoxelIndicesIJK([[1, 2, 3]]), - vertices=[ci.Cifti2Vertices([0, 1, 2])]) + pl = ci.Cifti2Parcel( + name='region', + voxel_indices_ijk=ci.Cifti2VoxelIndicesIJK([[1, 2, 3]]), + vertices=[ci.Cifti2Vertices([0, 1, 2])], + ) pl.pop_cifti2_vertices(0) assert len(pl.vertices) == 0 - assert pl.to_xml().decode('utf-8') == '1 2 3' + assert ( + pl.to_xml().decode('utf-8') + == '1 2 3' + ) def test_cifti2_vertices(): @@ -209,7 +219,10 @@ def test_cifti2_vertices(): with pytest.raises(ValueError): vs.insert(1, 'a') - assert vs.to_xml().decode('utf-8') == '0 1 2' + assert ( + vs.to_xml().decode('utf-8') + == '0 1 2' + ) vs[0] = 10 assert vs[0] == 10 @@ -244,7 +257,7 @@ def test_cifti2_vertexindices(): vi.extend(np.array([0, 1, 2])) assert len(vi) == 3 assert vi.to_xml().decode('utf-8') == '0 1 2' - + with pytest.raises(ValueError): vi[0] = 'a' @@ -296,17 +309,17 @@ def test_cifti2_voxelindicesijk(): assert vi[0, 1] == 10 vi[0, 1] = 1 - #test for vi[:, 0] and other slices + # test for vi[:, 0] and other slices with pytest.raises(NotImplementedError): vi[:, 0] with pytest.raises(NotImplementedError): vi[:, 0] = 0 with pytest.raises(NotImplementedError): # Don't know how to use remove with slice - del vi[:, 0] + del vi[:, 0] with pytest.raises(ValueError): vi[0, 0, 0] - + with pytest.raises(ValueError): vi[0, 0, 0] = 0 @@ -328,11 +341,10 @@ def test_matrixindicesmap(): assert mim.volume is None mim.extend((volume, parcel)) - assert mim.volume == volume with pytest.raises(ci.Cifti2HeaderError): mim.insert(0, volume) - + with pytest.raises(ci.Cifti2HeaderError): mim[1] = volume @@ -361,7 +373,7 @@ def test_matrix(): with pytest.raises(TypeError): m[0] = ci.Cifti2Parcel() - + with pytest.raises(TypeError): m.insert(0, ci.Cifti2Parcel()) @@ -382,7 +394,7 @@ def test_matrix(): assert h.number_of_mapped_indices == 1 with pytest.raises(ci.Cifti2HeaderError): m.insert(0, mim_0) - + with pytest.raises(ci.Cifti2HeaderError): m.insert(0, mim_01) @@ -400,23 +412,24 @@ def test_matrix(): def test_underscoring(): # Pairs taken from inflection tests # https://github.com/jpvanhal/inflection/blob/663982e/test_inflection.py#L113-L125 - pairs = (("Product", "product"), - ("SpecialGuest", "special_guest"), - ("ApplicationController", "application_controller"), - ("Area51Controller", "area51_controller"), - ("HTMLTidy", "html_tidy"), - ("HTMLTidyGenerator", "html_tidy_generator"), - ("FreeBSD", "free_bsd"), - ("HTML", "html"), - ) + pairs = ( + ('Product', 'product'), + ('SpecialGuest', 'special_guest'), + ('ApplicationController', 'application_controller'), + ('Area51Controller', 'area51_controller'), + ('HTMLTidy', 'html_tidy'), + ('HTMLTidyGenerator', 'html_tidy_generator'), + ('FreeBSD', 'free_bsd'), + ('HTML', 'html'), + ) for camel, underscored in pairs: assert ci.cifti2._underscore(camel) == underscored class TestCifti2ImageAPI(_TDA, SerializeMixin, DtypeOverrideMixin): - """ Basic validation for Cifti2Image instances - """ + """Basic validation for Cifti2Image instances""" + # A callable returning an image from ``image_maker(data, header)`` image_maker = ci.Cifti2Image # A callable returning a header from ``header_maker()`` @@ -425,14 +438,22 @@ class TestCifti2ImageAPI(_TDA, SerializeMixin, DtypeOverrideMixin): ni_header_maker = Nifti2Header example_shapes = ((2,), (2, 3), (2, 3, 4)) standard_extension = '.nii' - storable_dtypes = (np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, - np.int64, np.uint64, np.float32, np.float64) + storable_dtypes = ( + np.int8, + np.uint8, + np.int16, + np.uint16, + np.int32, + np.uint32, + np.int64, + np.uint64, + np.float32, + np.float64, + ) def make_imaker(self, arr, header=None, ni_header=None): for idx, sz in enumerate(arr.shape): maps = [ci.Cifti2NamedMap(str(value)) for value in range(sz)] - mim = ci.Cifti2MatrixIndicesMap( - (idx, ), 'CIFTI_INDEX_TYPE_SCALARS', maps=maps - ) + mim = ci.Cifti2MatrixIndicesMap((idx,), 'CIFTI_INDEX_TYPE_SCALARS', maps=maps) header.matrix.append(mim) return lambda: self.image_maker(arr.copy(), header, ni_header) diff --git a/nibabel/cifti2/tests/test_cifti2io_axes.py b/nibabel/cifti2/tests/test_cifti2io_axes.py index fb5a485d98..756b0f6c9f 100644 --- a/nibabel/cifti2/tests/test_cifti2io_axes.py +++ b/nibabel/cifti2/tests/test_cifti2io_axes.py @@ -7,28 +7,68 @@ test_directory = os.path.join(get_nibabel_data(), 'nitest-cifti2') -hcp_labels = ['CortexLeft', 'CortexRight', 'AccumbensLeft', 'AccumbensRight', 'AmygdalaLeft', 'AmygdalaRight', - 'brain_stem', 'CaudateLeft', 'CaudateRight', 'CerebellumLeft', 'CerebellumRight', - 'Diencephalon_ventral_left', 'Diencephalon_ventral_right', 'HippocampusLeft', 'HippocampusRight', - 'PallidumLeft', 'PallidumRight', 'PutamenLeft', 'PutamenRight', 'ThalamusLeft', 'ThalamusRight'] - -hcp_n_elements = [29696, 29716, 135, 140, 315, 332, 3472, 728, 755, 8709, 9144, 706, - 712, 764, 795, 297, 260, 1060, 1010, 1288, 1248] - -hcp_affine = np.array([[ -2., 0., 0., 90.], - [ 0., 2., 0., -126.], - [ 0., 0., 2., -72.], - [ 0., 0., 0., 1.]]) +hcp_labels = [ + 'CortexLeft', + 'CortexRight', + 'AccumbensLeft', + 'AccumbensRight', + 'AmygdalaLeft', + 'AmygdalaRight', + 'brain_stem', + 'CaudateLeft', + 'CaudateRight', + 'CerebellumLeft', + 'CerebellumRight', + 'Diencephalon_ventral_left', + 'Diencephalon_ventral_right', + 'HippocampusLeft', + 'HippocampusRight', + 'PallidumLeft', + 'PallidumRight', + 'PutamenLeft', + 'PutamenRight', + 'ThalamusLeft', + 'ThalamusRight', +] + +hcp_n_elements = [ + 29696, + 29716, + 135, + 140, + 315, + 332, + 3472, + 728, + 755, + 8709, + 9144, + 706, + 712, + 764, + 795, + 297, + 260, + 1060, + 1010, + 1288, + 1248, +] + +hcp_affine = np.array( + [[-2.0, 0.0, 0.0, 90.0], [0.0, 2.0, 0.0, -126.0], [0.0, 0.0, 2.0, -72.0], [0.0, 0.0, 0.0, 1.0]] +) def check_hcp_grayordinates(brain_model): - """Checks that a BrainModelAxis matches the expected 32k HCP grayordinates - """ + """Checks that a BrainModelAxis matches the expected 32k HCP grayordinates""" assert isinstance(brain_model, cifti2_axes.BrainModelAxis) structures = list(brain_model.iter_structures()) assert len(structures) == len(hcp_labels) idx_start = 0 - for idx, (name, _, bm), label, nel in zip(range(len(structures)), structures, hcp_labels, hcp_n_elements): + for idx, (name, _, bm), label, nel in zip( + range(len(structures)), structures, hcp_labels, hcp_n_elements + ): if idx < 2: assert name in bm.nvertices.keys() assert (bm.voxel == -1).all() @@ -42,9 +82,9 @@ def check_hcp_grayordinates(brain_model): assert bm.volume_shape == (91, 109, 91) assert name == cifti2_axes.BrainModelAxis.to_cifti_brain_structure_name(label) assert len(bm) == nel - assert (bm.name == brain_model.name[idx_start:idx_start + nel]).all() - assert (bm.voxel == brain_model.voxel[idx_start:idx_start + nel]).all() - assert (bm.vertex == brain_model.vertex[idx_start:idx_start + nel]).all() + assert (bm.name == brain_model.name[idx_start : idx_start + nel]).all() + assert (bm.voxel == brain_model.voxel[idx_start : idx_start + nel]).all() + assert (bm.vertex == brain_model.vertex[idx_start : idx_start + nel]).all() idx_start += nel assert idx_start == len(brain_model) @@ -60,8 +100,7 @@ def check_hcp_grayordinates(brain_model): def check_Conte69(brain_model): - """Checks that the BrainModelAxis matches the expected Conte69 surface coordinates - """ + """Checks that the BrainModelAxis matches the expected Conte69 surface coordinates""" assert isinstance(brain_model, cifti2_axes.BrainModelAxis) structures = list(brain_model.iter_structures()) assert len(structures) == 2 @@ -96,7 +135,7 @@ def check_rewrite(arr, axes, extension='.nii'): arr2 = img.get_fdata() assert np.allclose(arr, arr2) for idx in range(len(img.shape)): - assert (axes[idx] == img.header.get_axis(idx)) + assert axes[idx] == img.header.get_axis(idx) return img @@ -117,21 +156,27 @@ def test_read_ones(): @needs_nibabel_data('nitest-cifti2') def test_read_conte69_dscalar(): - img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.ScalarAxis) assert len(axes[0]) == 2 assert axes[0].name[0] == 'MyelinMap_BC_decurv' assert axes[0].name[1] == 'corrThickness' - assert axes[0].meta[0] == {'PaletteColorMapping': '\n MODE_AUTO_SCALE_PERCENTAGE\n 98.000000 2.000000 2.000000 98.000000\n -100.000000 0.000000 0.000000 100.000000\n ROY-BIG-BL\n true\n true\n false\n true\n THRESHOLD_TEST_SHOW_OUTSIDE\n THRESHOLD_TYPE_OFF\n false\n -1.000000 1.000000\n -1.000000 1.000000\n -1.000000 1.000000\n \n PALETTE_THRESHOLD_RANGE_MODE_MAP\n'} + assert axes[0].meta[0] == { + 'PaletteColorMapping': '\n MODE_AUTO_SCALE_PERCENTAGE\n 98.000000 2.000000 2.000000 98.000000\n -100.000000 0.000000 0.000000 100.000000\n ROY-BIG-BL\n true\n true\n false\n true\n THRESHOLD_TEST_SHOW_OUTSIDE\n THRESHOLD_TYPE_OFF\n false\n -1.000000 1.000000\n -1.000000 1.000000\n -1.000000 1.000000\n \n PALETTE_THRESHOLD_RANGE_MODE_MAP\n' + } check_Conte69(axes[1]) check_rewrite(arr, axes) @needs_nibabel_data('nitest-cifti2') def test_read_conte69_dtseries(): - img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.SeriesAxis) @@ -146,13 +191,21 @@ def test_read_conte69_dtseries(): @needs_nibabel_data('nitest-cifti2') def test_read_conte69_dlabel(): - img = nib.load(os.path.join(test_directory, 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.LabelAxis) assert len(axes[0]) == 3 - assert (axes[0].name == ['Composite Parcellation-lh (FRB08_OFP03_retinotopic)', - 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', 'MEDIAL WALL lh (fs_LR)']).all() + assert ( + axes[0].name + == [ + 'Composite Parcellation-lh (FRB08_OFP03_retinotopic)', + 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', + 'MEDIAL WALL lh (fs_LR)', + ] + ).all() assert axes[0].label[1][70] == ('19_B05', (1.0, 0.867, 0.467, 1.0)) assert (axes[0].meta == [{}] * 3).all() check_Conte69(axes[1]) @@ -161,7 +214,9 @@ def test_read_conte69_dlabel(): @needs_nibabel_data('nitest-cifti2') def test_read_conte69_ptseries(): - img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.SeriesAxis) @@ -175,6 +230,6 @@ def test_read_conte69_ptseries(): voxels, vertices = axes[1]['ER_FRB08'] assert voxels.shape == (0, 3) assert len(vertices) == 2 - assert vertices['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (206 // 2, ) - assert vertices['CIFTI_STRUCTURE_CORTEX_RIGHT'].shape == (206 // 2, ) + assert vertices['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (206 // 2,) + assert vertices['CIFTI_STRUCTURE_CORTEX_RIGHT'].shape == (206 // 2,) check_rewrite(arr, axes) diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 541ceaa30c..3497ec413f 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -29,21 +29,16 @@ CIFTI2_DATA = pjoin(get_nibabel_data(), 'nitest-cifti2') DATA_FILE1 = pjoin(CIFTI2_DATA, '') -DATA_FILE2 = pjoin(CIFTI2_DATA, - 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii') -DATA_FILE3 = pjoin(CIFTI2_DATA, - 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii') -DATA_FILE4 = pjoin(CIFTI2_DATA, - 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii') -DATA_FILE5 = pjoin(CIFTI2_DATA, - 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii') +DATA_FILE2 = pjoin(CIFTI2_DATA, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii') +DATA_FILE3 = pjoin(CIFTI2_DATA, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii') +DATA_FILE4 = pjoin(CIFTI2_DATA, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii') +DATA_FILE5 = pjoin(CIFTI2_DATA, 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii') DATA_FILE6 = pjoin(CIFTI2_DATA, 'ones.dscalar.nii') datafiles = [DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6] def test_space_separated_affine(): - img = ci.Cifti2Image.from_filename( - pjoin(NIBABEL_TEST_DATA, "row_major.dconn.nii")) + img = ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) def test_read_nifti2(): @@ -92,12 +87,9 @@ def test_readwritedata(): img2 = ci.load('test.nii') assert len(img.header.matrix) == len(img2.header.matrix) # Order should be preserved in load/save - for mim1, mim2 in zip(img.header.matrix, - img2.header.matrix): - named_maps1 = [m_ for m_ in mim1 - if isinstance(m_, ci.Cifti2NamedMap)] - named_maps2 = [m_ for m_ in mim2 - if isinstance(m_, ci.Cifti2NamedMap)] + for mim1, mim2 in zip(img.header.matrix, img2.header.matrix): + named_maps1 = [m_ for m_ in mim1 if isinstance(m_, ci.Cifti2NamedMap)] + named_maps2 = [m_ for m_ in mim2 if isinstance(m_, ci.Cifti2NamedMap)] assert len(named_maps1) == len(named_maps2) for map1, map2 in zip(named_maps1, named_maps2): assert map1.map_name == map2.map_name @@ -118,12 +110,9 @@ def test_nibabel_readwritedata(): img2 = nib.load('test.nii') assert len(img.header.matrix) == len(img2.header.matrix) # Order should be preserved in load/save - for mim1, mim2 in zip(img.header.matrix, - img2.header.matrix): - named_maps1 = [m_ for m_ in mim1 - if isinstance(m_, ci.Cifti2NamedMap)] - named_maps2 = [m_ for m_ in mim2 - if isinstance(m_, ci.Cifti2NamedMap)] + for mim1, mim2 in zip(img.header.matrix, img2.header.matrix): + named_maps1 = [m_ for m_ in mim1 if isinstance(m_, ci.Cifti2NamedMap)] + named_maps2 = [m_ for m_ in mim2 if isinstance(m_, ci.Cifti2NamedMap)] assert len(named_maps1) == len(named_maps2) for map1, map2 in zip(named_maps1, named_maps2): assert map1.map_name == map2.map_name @@ -138,19 +127,20 @@ def test_nibabel_readwritedata(): def test_cifti2types(): """Check that we instantiate Cifti2 classes correctly, and that our test files exercise all classes""" - counter = {ci.Cifti2LabelTable: 0, - ci.Cifti2Label: 0, - ci.Cifti2NamedMap: 0, - ci.Cifti2Surface: 0, - ci.Cifti2VoxelIndicesIJK: 0, - ci.Cifti2Vertices: 0, - ci.Cifti2Parcel: 0, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ: 0, - ci.Cifti2Volume: 0, - ci.Cifti2VertexIndices: 0, - ci.Cifti2BrainModel: 0, - ci.Cifti2MatrixIndicesMap: 0, - } + counter = { + ci.Cifti2LabelTable: 0, + ci.Cifti2Label: 0, + ci.Cifti2NamedMap: 0, + ci.Cifti2Surface: 0, + ci.Cifti2VoxelIndicesIJK: 0, + ci.Cifti2Vertices: 0, + ci.Cifti2Parcel: 0, + ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ: 0, + ci.Cifti2Volume: 0, + ci.Cifti2VertexIndices: 0, + ci.Cifti2BrainModel: 0, + ci.Cifti2MatrixIndicesMap: 0, + } for name in datafiles: hdr = ci.load(name).header @@ -166,8 +156,7 @@ def test_cifti2types(): counter[ci.Cifti2BrainModel] += 1 if isinstance(map_.vertex_indices, ci.Cifti2VertexIndices): counter[ci.Cifti2VertexIndices] += 1 - if isinstance(map_.voxel_indices_ijk, - ci.Cifti2VoxelIndicesIJK): + if isinstance(map_.voxel_indices_ijk, ci.Cifti2VoxelIndicesIJK): counter[ci.Cifti2VoxelIndicesIJK] += 1 elif isinstance(map_, ci.Cifti2NamedMap): counter[ci.Cifti2NamedMap] += 1 @@ -179,8 +168,7 @@ def test_cifti2types(): counter[ci.Cifti2Label] += 1 elif isinstance(map_, ci.Cifti2Parcel): counter[ci.Cifti2Parcel] += 1 - if isinstance(map_.voxel_indices_ijk, - ci.Cifti2VoxelIndicesIJK): + if isinstance(map_.voxel_indices_ijk, ci.Cifti2VoxelIndicesIJK): counter[ci.Cifti2VoxelIndicesIJK] += 1 assert isinstance(map_.vertices, list) for vtcs in map_.vertices: @@ -190,18 +178,24 @@ def test_cifti2types(): counter[ci.Cifti2Surface] += 1 elif isinstance(map_, ci.Cifti2Volume): counter[ci.Cifti2Volume] += 1 - if isinstance(map_.transformation_matrix_voxel_indices_ijk_to_xyz, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ): + if isinstance( + map_.transformation_matrix_voxel_indices_ijk_to_xyz, + ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + ): counter[ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ] += 1 assert list(mim.named_maps) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2NamedMap)] assert list(mim.surfaces) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Surface)] assert list(mim.parcels) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Parcel)] - assert list(mim.brain_models) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2BrainModel)] - assert ([mim.volume] if mim.volume else []) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Volume)] + assert list(mim.brain_models) == [ + m_ for m_ in mim if isinstance(m_, ci.Cifti2BrainModel) + ] + assert ([mim.volume] if mim.volume else []) == [ + m_ for m_ in mim if isinstance(m_, ci.Cifti2Volume) + ] for klass, count in counter.items(): - assert count > 0, "No exercise of " + klass.__name__ + assert count > 0, 'No exercise of ' + klass.__name__ @needs_nibabel_data('nitest-cifti2') @@ -211,30 +205,32 @@ def test_read_geometry(): # For every brain model in ones.dscalar.nii defines: # brain structure name, number of grayordinates, first vertex or voxel, last vertex or voxel - expected_geometry = [('CIFTI_STRUCTURE_CORTEX_LEFT', 29696, 0, 32491), - ('CIFTI_STRUCTURE_CORTEX_RIGHT', 29716, 0, 32491), - ('CIFTI_STRUCTURE_ACCUMBENS_LEFT', 135, [49, 66, 28], [48, 72, 35]), - ('CIFTI_STRUCTURE_ACCUMBENS_RIGHT', 140, [40, 66, 29], [43, 66, 36]), - ('CIFTI_STRUCTURE_AMYGDALA_LEFT', 315, [55, 61, 21], [56, 58, 31]), - ('CIFTI_STRUCTURE_AMYGDALA_RIGHT', 332, [34, 62, 20], [36, 61, 31]), - ('CIFTI_STRUCTURE_BRAIN_STEM', 3472, [42, 41, 0], [46, 50, 36]), - ('CIFTI_STRUCTURE_CAUDATE_LEFT', 728, [50, 72, 32], [53, 60, 49]), - ('CIFTI_STRUCTURE_CAUDATE_RIGHT', 755, [40, 68, 33], [37, 62, 49]), - ('CIFTI_STRUCTURE_CEREBELLUM_LEFT', 8709, [49, 35, 4], [46, 37, 37]), - ('CIFTI_STRUCTURE_CEREBELLUM_RIGHT', 9144, [38, 35, 4], [44, 38, 36]), - ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', 706, [52, 53, 26], [56, 49, 35]), - ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', 712, [39, 54, 26], [35, 49, 36]), - ('CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', 764, [55, 60, 21], [54, 44, 39]), - ('CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', 795, [33, 60, 21], [38, 45, 39]), - ('CIFTI_STRUCTURE_PALLIDUM_LEFT', 297, [56, 59, 32], [55, 61, 39]), - ('CIFTI_STRUCTURE_PALLIDUM_RIGHT', 260, [36, 62, 32], [35, 62, 39]), - ('CIFTI_STRUCTURE_PUTAMEN_LEFT', 1060, [51, 66, 28], [58, 64, 43]), - ('CIFTI_STRUCTURE_PUTAMEN_RIGHT', 1010, [34, 66, 29], [31, 62, 43]), - ('CIFTI_STRUCTURE_THALAMUS_LEFT', 1288, [55, 47, 33], [52, 53, 46]), - ('CIFTI_STRUCTURE_THALAMUS_RIGHT', 1248, [32, 47, 34], [38, 55, 46])] + expected_geometry = [ + ('CIFTI_STRUCTURE_CORTEX_LEFT', 29696, 0, 32491), + ('CIFTI_STRUCTURE_CORTEX_RIGHT', 29716, 0, 32491), + ('CIFTI_STRUCTURE_ACCUMBENS_LEFT', 135, [49, 66, 28], [48, 72, 35]), + ('CIFTI_STRUCTURE_ACCUMBENS_RIGHT', 140, [40, 66, 29], [43, 66, 36]), + ('CIFTI_STRUCTURE_AMYGDALA_LEFT', 315, [55, 61, 21], [56, 58, 31]), + ('CIFTI_STRUCTURE_AMYGDALA_RIGHT', 332, [34, 62, 20], [36, 61, 31]), + ('CIFTI_STRUCTURE_BRAIN_STEM', 3472, [42, 41, 0], [46, 50, 36]), + ('CIFTI_STRUCTURE_CAUDATE_LEFT', 728, [50, 72, 32], [53, 60, 49]), + ('CIFTI_STRUCTURE_CAUDATE_RIGHT', 755, [40, 68, 33], [37, 62, 49]), + ('CIFTI_STRUCTURE_CEREBELLUM_LEFT', 8709, [49, 35, 4], [46, 37, 37]), + ('CIFTI_STRUCTURE_CEREBELLUM_RIGHT', 9144, [38, 35, 4], [44, 38, 36]), + ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', 706, [52, 53, 26], [56, 49, 35]), + ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', 712, [39, 54, 26], [35, 49, 36]), + ('CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', 764, [55, 60, 21], [54, 44, 39]), + ('CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', 795, [33, 60, 21], [38, 45, 39]), + ('CIFTI_STRUCTURE_PALLIDUM_LEFT', 297, [56, 59, 32], [55, 61, 39]), + ('CIFTI_STRUCTURE_PALLIDUM_RIGHT', 260, [36, 62, 32], [35, 62, 39]), + ('CIFTI_STRUCTURE_PUTAMEN_LEFT', 1060, [51, 66, 28], [58, 64, 43]), + ('CIFTI_STRUCTURE_PUTAMEN_RIGHT', 1010, [34, 66, 29], [31, 62, 43]), + ('CIFTI_STRUCTURE_THALAMUS_LEFT', 1288, [55, 47, 33], [52, 53, 46]), + ('CIFTI_STRUCTURE_THALAMUS_RIGHT', 1248, [32, 47, 34], [38, 55, 46]), + ] current_index = 0 for from_file, expected in zip(geometry_mapping.brain_models, expected_geometry): - assert from_file.model_type in ("CIFTI_MODEL_TYPE_SURFACE", "CIFTI_MODEL_TYPE_VOXELS") + assert from_file.model_type in ('CIFTI_MODEL_TYPE_SURFACE', 'CIFTI_MODEL_TYPE_VOXELS') assert from_file.brain_structure == expected[0] assert from_file.index_offset == current_index assert from_file.index_count == expected[1] @@ -254,13 +250,12 @@ def test_read_geometry(): assert from_file.voxel_indices_ijk[-1] == expected[3] assert current_index == img.shape[1] - expected_affine = [[-2, 0, 0, 90], - [ 0, 2, 0, -126], - [ 0, 0, 2, -72], - [ 0, 0, 0, 1]] + expected_affine = [[-2, 0, 0, 90], [0, 2, 0, -126], [0, 0, 2, -72], [0, 0, 0, 1]] expected_dimensions = (91, 109, 91) - assert (geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == - expected_affine).all() + assert ( + geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix + == expected_affine + ).all() assert geometry_mapping.volume.volume_dimensions == expected_dimensions @@ -269,60 +264,62 @@ def test_read_parcels(): img = ci.Cifti2Image.from_filename(DATA_FILE4) parcel_mapping = img.header.matrix.get_index_map(1) - expected_parcels = [('MEDIAL.WALL', ((719, 20, 28550), (810, 21, 28631))), - ('BA2_FRB08', ((516, 6757, 17888), (461, 6757, 17887))), - ('BA1_FRB08', ((211, 5029, 17974), (214, 3433, 17934))), - ('BA3b_FRB08', ((444, 3436, 18065), (397, 3436, 18065))), - ('BA4p_FRB08', ((344, 3445, 18164), (371, 3443, 18175))), - ('BA3a_FRB08', ((290, 3441, 18140), (289, 3440, 18140))), - ('BA4a_FRB08', ((471, 3446, 18181), (455, 3446, 19759))), - ('BA6_FRB08', ((1457, 2, 30951), (1400, 2, 30951))), - ('BA17_V1_FRB08', ((629, 23155, 25785), (635, 23155, 25759))), - ('BA45_FRB08', ((245, 10100, 18774), (214, 10103, 18907))), - ('BA44_FRB08', ((226, 10118, 19240), (273, 10119, 19270))), - ('hOc5_MT_FRB08', ((104, 15019, 23329), (80, 15023, 23376))), - ('BA18_V2_FRB08', ((702, 95, 25902), (651, 98, 25903))), - ('V3A_SHM07', ((82, 4, 25050), (82, 4, 25050))), - ('V3B_SHM07', ((121, 13398, 23303), (121, 13398, 23303))), - ('LO1_KPO10', ((54, 15007, 23543), (54, 15007, 23543))), - ('LO2_KPO10', ((79, 15013, 23636), (79, 15013, 23636))), - ('PITd_KPO10', ((53, 15018, 23769), (65, 15018, 23769))), - ('PITv_KPO10', ((72, 23480, 23974), (72, 23480, 23974))), - ('OP1_BSW08', ((470, 8421, 18790), (470, 8421, 18790))), - ('OP2_BSW08', ((67, 10, 31060), (67, 10, 31060))), - ('OP3_BSW08', ((119, 10137, 18652), (119, 10137, 18652))), - ('OP4_BSW08', ((191, 16613, 19429), (192, 16613, 19429))), - ('IPS1_SHM07', ((54, 11775, 14496), (54, 11775, 14496))), - ('IPS2_SHM07', ((71, 11771, 14587), (71, 11771, 14587))), - ('IPS3_SHM07', ((114, 11764, 14783), (114, 11764, 14783))), - ('IPS4_SHM07', ((101, 11891, 12653), (101, 11891, 12653))), - ('V7_SHM07', ((140, 11779, 14002), (140, 11779, 14002))), - ('V4v_SHM07', ((81, 23815, 24557), (90, 23815, 24557))), - ('V3d_KPO10', ((90, 23143, 25192), (115, 23143, 25192))), - ('14c_OFP03', ((22, 19851, 21311), (22, 19851, 21311))), - ('13a_OFP03', ((20, 20963, 21154), (20, 20963, 21154))), - ('47s_OFP03', ((211, 10182, 20343), (211, 10182, 20343))), - ('14r_OFP03', ((54, 21187, 21324), (54, 21187, 21324))), - ('13m_OFP03', ((103, 20721, 21075), (103, 20721, 21075))), - ('13l_OFP03', ((101, 20466, 20789), (101, 20466, 20789))), - ('32pl_OFP03', ((14, 19847, 21409), (14, 19847, 21409))), - ('25_OFP03', ((8, 19844, 27750), (8, 19844, 27750))), - ('47m_OFP03', ((200, 10174, 20522), (200, 10174, 20522))), - ('47l_OFP03', ((142, 10164, 19969), (160, 10164, 19969))), - ('Iai_OFP03', ((153, 10188, 20199), (153, 10188, 20199))), - ('10r_OFP03', ((138, 19811, 28267), (138, 19811, 28267))), - ('11m_OFP03', ((92, 20850, 21165), (92, 20850, 21165))), - ('11l_OFP03', ((200, 20275, 21029), (200, 20275, 21029))), - ('47r_OFP03', ((259, 10094, 20535), (259, 10094, 20535))), - ('10m_OFP03', ((102, 19825, 21411), (102, 19825, 21411))), - ('Iam_OFP03', ((15, 20346, 20608), (15, 20346, 20608))), - ('Ial_OFP03', ((89, 10194, 11128), (89, 10194, 11128))), - ('24_OFP03', ((39, 19830, 28279), (36, 19830, 28279))), - ('Iapm_OFP03', ((7, 20200, 20299), (7, 20200, 20299))), - ('10p_OFP03', ((480, 19780, 28640), (480, 19780, 28640))), - ('V6_PHG06', ((72, 12233, 12869), (72, 12233, 12869))), - ('ER_FRB08', ((103, 21514, 26470), (103, 21514, 26470))), - ('13b_OFP03', ((60, 21042, 21194), (71, 21040, 21216)))] + expected_parcels = [ + ('MEDIAL.WALL', ((719, 20, 28550), (810, 21, 28631))), + ('BA2_FRB08', ((516, 6757, 17888), (461, 6757, 17887))), + ('BA1_FRB08', ((211, 5029, 17974), (214, 3433, 17934))), + ('BA3b_FRB08', ((444, 3436, 18065), (397, 3436, 18065))), + ('BA4p_FRB08', ((344, 3445, 18164), (371, 3443, 18175))), + ('BA3a_FRB08', ((290, 3441, 18140), (289, 3440, 18140))), + ('BA4a_FRB08', ((471, 3446, 18181), (455, 3446, 19759))), + ('BA6_FRB08', ((1457, 2, 30951), (1400, 2, 30951))), + ('BA17_V1_FRB08', ((629, 23155, 25785), (635, 23155, 25759))), + ('BA45_FRB08', ((245, 10100, 18774), (214, 10103, 18907))), + ('BA44_FRB08', ((226, 10118, 19240), (273, 10119, 19270))), + ('hOc5_MT_FRB08', ((104, 15019, 23329), (80, 15023, 23376))), + ('BA18_V2_FRB08', ((702, 95, 25902), (651, 98, 25903))), + ('V3A_SHM07', ((82, 4, 25050), (82, 4, 25050))), + ('V3B_SHM07', ((121, 13398, 23303), (121, 13398, 23303))), + ('LO1_KPO10', ((54, 15007, 23543), (54, 15007, 23543))), + ('LO2_KPO10', ((79, 15013, 23636), (79, 15013, 23636))), + ('PITd_KPO10', ((53, 15018, 23769), (65, 15018, 23769))), + ('PITv_KPO10', ((72, 23480, 23974), (72, 23480, 23974))), + ('OP1_BSW08', ((470, 8421, 18790), (470, 8421, 18790))), + ('OP2_BSW08', ((67, 10, 31060), (67, 10, 31060))), + ('OP3_BSW08', ((119, 10137, 18652), (119, 10137, 18652))), + ('OP4_BSW08', ((191, 16613, 19429), (192, 16613, 19429))), + ('IPS1_SHM07', ((54, 11775, 14496), (54, 11775, 14496))), + ('IPS2_SHM07', ((71, 11771, 14587), (71, 11771, 14587))), + ('IPS3_SHM07', ((114, 11764, 14783), (114, 11764, 14783))), + ('IPS4_SHM07', ((101, 11891, 12653), (101, 11891, 12653))), + ('V7_SHM07', ((140, 11779, 14002), (140, 11779, 14002))), + ('V4v_SHM07', ((81, 23815, 24557), (90, 23815, 24557))), + ('V3d_KPO10', ((90, 23143, 25192), (115, 23143, 25192))), + ('14c_OFP03', ((22, 19851, 21311), (22, 19851, 21311))), + ('13a_OFP03', ((20, 20963, 21154), (20, 20963, 21154))), + ('47s_OFP03', ((211, 10182, 20343), (211, 10182, 20343))), + ('14r_OFP03', ((54, 21187, 21324), (54, 21187, 21324))), + ('13m_OFP03', ((103, 20721, 21075), (103, 20721, 21075))), + ('13l_OFP03', ((101, 20466, 20789), (101, 20466, 20789))), + ('32pl_OFP03', ((14, 19847, 21409), (14, 19847, 21409))), + ('25_OFP03', ((8, 19844, 27750), (8, 19844, 27750))), + ('47m_OFP03', ((200, 10174, 20522), (200, 10174, 20522))), + ('47l_OFP03', ((142, 10164, 19969), (160, 10164, 19969))), + ('Iai_OFP03', ((153, 10188, 20199), (153, 10188, 20199))), + ('10r_OFP03', ((138, 19811, 28267), (138, 19811, 28267))), + ('11m_OFP03', ((92, 20850, 21165), (92, 20850, 21165))), + ('11l_OFP03', ((200, 20275, 21029), (200, 20275, 21029))), + ('47r_OFP03', ((259, 10094, 20535), (259, 10094, 20535))), + ('10m_OFP03', ((102, 19825, 21411), (102, 19825, 21411))), + ('Iam_OFP03', ((15, 20346, 20608), (15, 20346, 20608))), + ('Ial_OFP03', ((89, 10194, 11128), (89, 10194, 11128))), + ('24_OFP03', ((39, 19830, 28279), (36, 19830, 28279))), + ('Iapm_OFP03', ((7, 20200, 20299), (7, 20200, 20299))), + ('10p_OFP03', ((480, 19780, 28640), (480, 19780, 28640))), + ('V6_PHG06', ((72, 12233, 12869), (72, 12233, 12869))), + ('ER_FRB08', ((103, 21514, 26470), (103, 21514, 26470))), + ('13b_OFP03', ((60, 21042, 21194), (71, 21040, 21216))), + ] assert img.shape[1] == len(expected_parcels) assert len(list(parcel_mapping.parcels)) == len(expected_parcels) @@ -330,8 +327,9 @@ def test_read_parcels(): for (name, expected_surfaces), parcel in zip(expected_parcels, parcel_mapping.parcels): assert parcel.name == name assert len(parcel.vertices) == 2 - for vertices, orientation, (length, first_element, last_element) in zip(parcel.vertices, ('LEFT', 'RIGHT'), - expected_surfaces): + for vertices, orientation, (length, first_element, last_element) in zip( + parcel.vertices, ('LEFT', 'RIGHT'), expected_surfaces + ): assert len(vertices) == length assert vertices[0] == first_element assert vertices[-1] == last_element @@ -355,19 +353,19 @@ def test_read_scalar(): print(expected_meta[0], scalar.metadata.data.keys()) for key, value in expected_meta: assert key in scalar.metadata.data.keys() - assert scalar.metadata[key][:len(value)] == value + assert scalar.metadata[key][: len(value)] == value - assert scalar.label_table is None, ".dscalar file should not define a label table" + assert scalar.label_table is None, '.dscalar file should not define a label table' @needs_nibabel_data('nitest-cifti2') def test_read_series(): img = ci.Cifti2Image.from_filename(DATA_FILE4) series_mapping = img.header.matrix.get_index_map(0) - assert series_mapping.series_start == 0. - assert series_mapping.series_step == 1. + assert series_mapping.series_start == 0.0 + assert series_mapping.series_step == 1.0 assert series_mapping.series_unit == 'SECOND' - assert series_mapping.series_exponent == 0. + assert series_mapping.series_exponent == 0.0 assert series_mapping.number_of_series_points == img.shape[0] @@ -376,25 +374,29 @@ def test_read_labels(): img = ci.Cifti2Image.from_filename(DATA_FILE5) label_mapping = img.header.matrix.get_index_map(0) - expected_names = ['Composite Parcellation-lh (FRB08_OFP03_retinotopic)', - 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', - 'MEDIAL WALL lh (fs_LR)'] + expected_names = [ + 'Composite Parcellation-lh (FRB08_OFP03_retinotopic)', + 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', + 'MEDIAL WALL lh (fs_LR)', + ] assert img.shape[0] == len(expected_names) assert len(list(label_mapping.named_maps)) == len(expected_names) - some_expected_labels = {0: ('???', (0.667, 0.667, 0.667, 0.0)), - 1: ('MEDIAL.WALL', (0.075, 0.075, 0.075, 1.0)), - 2: ('BA2_FRB08', (0.467, 0.459, 0.055, 1.0)), - 3: ('BA1_FRB08', (0.475, 0.722, 0.859, 1.0)), - 4: ('BA3b_FRB08', (0.855, 0.902, 0.286, 1.0)), - 5: ('BA4p_FRB08', (0.902, 0.573, 0.122, 1.0)), - 89: ('36_B05', (0.467, 0.0, 0.129, 1.0)), - 90: ('35_B05', (0.467, 0.067, 0.067, 1.0)), - 91: ('28_B05', (0.467, 0.337, 0.271, 1.0)), - 92: ('29_B05', (0.267, 0.0, 0.529, 1.0)), - 93: ('26_B05', (0.757, 0.2, 0.227, 1.0)), - 94: ('33_B05', (0.239, 0.082, 0.373, 1.0)), - 95: ('13b_OFP03', (1.0, 1.0, 0.0, 1.0))} + some_expected_labels = { + 0: ('???', (0.667, 0.667, 0.667, 0.0)), + 1: ('MEDIAL.WALL', (0.075, 0.075, 0.075, 1.0)), + 2: ('BA2_FRB08', (0.467, 0.459, 0.055, 1.0)), + 3: ('BA1_FRB08', (0.475, 0.722, 0.859, 1.0)), + 4: ('BA3b_FRB08', (0.855, 0.902, 0.286, 1.0)), + 5: ('BA4p_FRB08', (0.902, 0.573, 0.122, 1.0)), + 89: ('36_B05', (0.467, 0.0, 0.129, 1.0)), + 90: ('35_B05', (0.467, 0.067, 0.067, 1.0)), + 91: ('28_B05', (0.467, 0.337, 0.271, 1.0)), + 92: ('29_B05', (0.267, 0.0, 0.529, 1.0)), + 93: ('26_B05', (0.757, 0.2, 0.227, 1.0)), + 94: ('33_B05', (0.239, 0.082, 0.373, 1.0)), + 95: ('13b_OFP03', (1.0, 1.0, 0.0, 1.0)), + } for named_map, name in zip(label_mapping.named_maps, expected_names): assert named_map.map_name == name @@ -440,9 +442,9 @@ def test_pixdim_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 35) assert fhdr['pixdim'][1] == 2 assert message == self._pixdim_message + '; setting to abs of pixdim values' - + pytest.raises(*raiser) - + hdr = HC() hdr['pixdim'][1:4] = 0 # No error or warning fhdr, message, raiser = self.log_chk(hdr, 0) diff --git a/nibabel/cifti2/tests/test_name.py b/nibabel/cifti2/tests/test_name.py index 6b53d46523..789de00b58 100644 --- a/nibabel/cifti2/tests/test_name.py +++ b/nibabel/cifti2/tests/test_name.py @@ -1,11 +1,38 @@ from nibabel.cifti2 import cifti2_axes -equivalents = [('CIFTI_STRUCTURE_CORTEX_LEFT', ('CortexLeft', 'LeftCortex', 'left_cortex', 'Left Cortex', - 'Cortex_Left', 'cortex left', 'CORTEX_LEFT', 'LEFT CORTEX', - ('cortex', 'left'), ('CORTEX', 'Left'), ('LEFT', 'coRTEX'))), - ('CIFTI_STRUCTURE_CORTEX', ('Cortex', 'CortexBOTH', 'Cortex_both', 'both cortex', - 'BOTH_CORTEX', 'cortex', 'CORTEX', ('cortex', ), - ('COrtex', 'Both'), ('both', 'cortex')))] +equivalents = [ + ( + 'CIFTI_STRUCTURE_CORTEX_LEFT', + ( + 'CortexLeft', + 'LeftCortex', + 'left_cortex', + 'Left Cortex', + 'Cortex_Left', + 'cortex left', + 'CORTEX_LEFT', + 'LEFT CORTEX', + ('cortex', 'left'), + ('CORTEX', 'Left'), + ('LEFT', 'coRTEX'), + ), + ), + ( + 'CIFTI_STRUCTURE_CORTEX', + ( + 'Cortex', + 'CortexBOTH', + 'Cortex_both', + 'both cortex', + 'BOTH_CORTEX', + 'cortex', + 'CORTEX', + ('cortex',), + ('COrtex', 'Both'), + ('both', 'cortex'), + ), + ), +] def test_name_conversion(): @@ -16,4 +43,4 @@ def test_name_conversion(): for base_name, input_names in equivalents: assert base_name == func(base_name) for name in input_names: - assert base_name == func(name) \ No newline at end of file + assert base_name == func(name) diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index a49ba79d52..15c6c110b9 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -14,53 +14,63 @@ import pytest from ...testing import ( - clear_and_catch_warnings, error_warnings, suppress_warnings, assert_array_equal) + clear_and_catch_warnings, + error_warnings, + suppress_warnings, + assert_array_equal, +) -affine = [[-1.5, 0, 0, 90], - [0, 1.5, 0, -85], - [0, 0, 1.5, -71], - [0, 0, 0, 1.]] +affine = [[-1.5, 0, 0, 90], [0, 1.5, 0, -85], [0, 0, 1.5, -71], [0, 0, 0, 1.0]] dimensions = (120, 83, 78) number_of_vertices = 30000 -brain_models = [('CIFTI_STRUCTURE_THALAMUS_LEFT', [[60, 60, 60], - [61, 59, 60], - [61, 60, 59], - [80, 90, 92]]), - ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), - ('CIFTI_STRUCTURE_CORTEX_RIGHT', [207]) - ] +brain_models = [ + ('CIFTI_STRUCTURE_THALAMUS_LEFT', [[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]]), + ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), + ('CIFTI_STRUCTURE_CORTEX_RIGHT', [207]), +] def create_geometry_map(applies_to_matrix_dimension): voxels = ci.Cifti2VoxelIndicesIJK(brain_models[0][1]) - left_thalamus = ci.Cifti2BrainModel(index_offset=0, index_count=4, - model_type='CIFTI_MODEL_TYPE_VOXELS', - brain_structure=brain_models[0][0], - voxel_indices_ijk=voxels) + left_thalamus = ci.Cifti2BrainModel( + index_offset=0, + index_count=4, + model_type='CIFTI_MODEL_TYPE_VOXELS', + brain_structure=brain_models[0][0], + voxel_indices_ijk=voxels, + ) vertices = ci.Cifti2VertexIndices(np.array(brain_models[1][1])) - left_cortex = ci.Cifti2BrainModel(index_offset=4, index_count=5, - model_type='CIFTI_MODEL_TYPE_SURFACE', - brain_structure=brain_models[1][0], - vertex_indices=vertices) + left_cortex = ci.Cifti2BrainModel( + index_offset=4, + index_count=5, + model_type='CIFTI_MODEL_TYPE_SURFACE', + brain_structure=brain_models[1][0], + vertex_indices=vertices, + ) left_cortex.surface_number_of_vertices = number_of_vertices vertices = ci.Cifti2VertexIndices(np.array(brain_models[2][1])) - right_cortex = ci.Cifti2BrainModel(index_offset=9, index_count=1, - model_type='CIFTI_MODEL_TYPE_SURFACE', - brain_structure=brain_models[2][0], - vertex_indices=vertices) + right_cortex = ci.Cifti2BrainModel( + index_offset=9, + index_count=1, + model_type='CIFTI_MODEL_TYPE_SURFACE', + brain_structure=brain_models[2][0], + vertex_indices=vertices, + ) right_cortex.surface_number_of_vertices = number_of_vertices - volume = ci.Cifti2Volume(dimensions, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, - affine)) - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_BRAIN_MODELS', - maps=[left_thalamus, left_cortex, right_cortex, volume]) + volume = ci.Cifti2Volume( + dimensions, ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine) + ) + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, + 'CIFTI_INDEX_TYPE_BRAIN_MODELS', + maps=[left_thalamus, left_cortex, right_cortex, volume], + ) def check_geometry_map(mapping): @@ -96,25 +106,25 @@ def check_geometry_map(mapping): assert (mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == affine).all() -parcels = [('volume_parcel', ([[60, 60, 60], - [61, 59, 60], - [61, 60, 59], - [80, 90, 92]], )), - ('surface_parcel', (('CIFTI_STRUCTURE_CORTEX_LEFT', - [0, 1000, 1301, 19972, 27312]), - ('CIFTI_STRUCTURE_CORTEX_RIGHT', - [0, 100, 381]))), - ('mixed_parcel', ([[71, 81, 39], - [53, 21, 91]], - ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999]))), - ('single_element', ([[71, 81, 39]], - ('CIFTI_STRUCTURE_CORTEX_LEFT', [40]))), - ] +parcels = [ + ('volume_parcel', ([[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]],)), + ( + 'surface_parcel', + ( + ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), + ('CIFTI_STRUCTURE_CORTEX_RIGHT', [0, 100, 381]), + ), + ), + ( + 'mixed_parcel', + ([[71, 81, 39], [53, 21, 91]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999])), + ), + ('single_element', ([[71, 81, 39]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [40]))), +] def create_parcel_map(applies_to_matrix_dimension): - mapping = ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_PARCELS') + mapping = ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, 'CIFTI_INDEX_TYPE_PARCELS') for name, elements in parcels: surfaces = [] volume = None @@ -125,10 +135,15 @@ def create_parcel_map(applies_to_matrix_dimension): volume = ci.Cifti2VoxelIndicesIJK(element) mapping.append(ci.Cifti2Parcel(name, volume, surfaces)) - mapping.extend([ci.Cifti2Surface(f'CIFTI_STRUCTURE_CORTEX_{orientation}', - number_of_vertices) for orientation in ['LEFT', 'RIGHT']]) - mapping.volume = ci.Cifti2Volume(dimensions, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine)) + mapping.extend( + [ + ci.Cifti2Surface(f'CIFTI_STRUCTURE_CORTEX_{orientation}', number_of_vertices) + for orientation in ['LEFT', 'RIGHT'] + ] + ) + mapping.volume = ci.Cifti2Volume( + dimensions, ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine) + ) return mapping @@ -155,16 +170,14 @@ def check_parcel_map(mapping): assert (mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == affine).all() -scalars = [('first_name', {'meta_key': 'some_metadata'}), - ('another name', {})] +scalars = [('first_name', {'meta_key': 'some_metadata'}), ('another name', {})] def create_scalar_map(applies_to_matrix_dimension): - maps = [ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta)) - for name, meta in scalars] - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_SCALARS', - maps=maps) + maps = [ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta)) for name, meta in scalars] + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, 'CIFTI_INDEX_TYPE_SCALARS', maps=maps + ) def check_scalar_map(mapping): @@ -179,11 +192,14 @@ def check_scalar_map(mapping): assert named_map.metadata == expected[1] -labels = [('first_name', {'meta_key': 'some_metadata'}, - {0: ('label0', (0.1, 0.3, 0.2, 0.5)), - 1: ('new_label', (0.5, 0.3, 0.1, 0.4))}), - ('another name', {}, {0: ('???', (0, 0, 0, 0)), - 1: ('great region', (0.4, 0.1, 0.23, 0.15))})] +labels = [ + ( + 'first_name', + {'meta_key': 'some_metadata'}, + {0: ('label0', (0.1, 0.3, 0.2, 0.5)), 1: ('new_label', (0.5, 0.3, 0.1, 0.4))}, + ), + ('another name', {}, {0: ('???', (0, 0, 0, 0)), 1: ('great region', (0.4, 0.1, 0.23, 0.15))}), +] def create_label_map(applies_to_matrix_dimension): @@ -192,11 +208,10 @@ def create_label_map(applies_to_matrix_dimension): label_table = ci.Cifti2LabelTable() for key, (tag, rgba) in label.items(): label_table[key] = ci.Cifti2Label(key, tag, *rgba) - maps.append(ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta), - label_table)) - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_LABELS', - maps=maps) + maps.append(ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta), label_table)) + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, 'CIFTI_INDEX_TYPE_LABELS', maps=maps + ) def check_label_map(mapping): @@ -212,11 +227,15 @@ def check_label_map(mapping): def create_series_map(applies_to_matrix_dimension): - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_SERIES', - number_of_series_points=13, - series_exponent=-3, series_start=18.2, - series_step=10.5, series_unit='SECOND') + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, + 'CIFTI_INDEX_TYPE_SERIES', + number_of_series_points=13, + series_exponent=-3, + series_start=18.2, + series_step=10.5, + series_unit='SECOND', + ) def check_series_map(mapping): @@ -229,8 +248,8 @@ def check_series_map(mapping): def test_dtseries(): - series_map = create_series_map((0, )) - geometry_map = create_geometry_map((1, )) + series_map = create_series_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((series_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -250,8 +269,8 @@ def test_dtseries(): def test_dscalar(): - scalar_map = create_scalar_map((0, )) - geometry_map = create_geometry_map((1, )) + scalar_map = create_scalar_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((scalar_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -271,8 +290,8 @@ def test_dscalar(): def test_dlabel(): - label_map = create_label_map((0, )) - geometry_map = create_geometry_map((1, )) + label_map = create_label_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((label_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -312,8 +331,8 @@ def test_dconn(): def test_ptseries(): - series_map = create_series_map((0, )) - parcel_map = create_parcel_map((1, )) + series_map = create_series_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((series_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -333,8 +352,8 @@ def test_ptseries(): def test_pscalar(): - scalar_map = create_scalar_map((0, )) - parcel_map = create_parcel_map((1, )) + scalar_map = create_scalar_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((scalar_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -354,8 +373,8 @@ def test_pscalar(): def test_pdconn(): - geometry_map = create_geometry_map((0, )) - parcel_map = create_parcel_map((1, )) + geometry_map = create_geometry_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((geometry_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -375,8 +394,8 @@ def test_pdconn(): def test_dpconn(): - parcel_map = create_parcel_map((0, )) - geometry_map = create_geometry_map((1, )) + parcel_map = create_parcel_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((parcel_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -396,8 +415,8 @@ def test_dpconn(): def test_plabel(): - label_map = create_label_map((0, )) - parcel_map = create_parcel_map((1, )) + label_map = create_label_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((label_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -437,15 +456,14 @@ def test_pconn(): def test_pconnseries(): parcel_map = create_parcel_map((0, 1)) - series_map = create_series_map((2, )) + series_map = create_series_map((2,)) matrix = ci.Cifti2Matrix() matrix.extend((parcel_map, series_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 13) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' - 'PARCELLATED_SERIES') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SERIES') with InTemporaryDirectory(): ci.save(img, 'test.pconnseries.nii') @@ -461,15 +479,14 @@ def test_pconnseries(): def test_pconnscalar(): parcel_map = create_parcel_map((0, 1)) - scalar_map = create_scalar_map((2, )) + scalar_map = create_scalar_map((2,)) matrix = ci.Cifti2Matrix() matrix.extend((parcel_map, scalar_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 2) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' - 'PARCELLATED_SCALAR') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SCALAR') with InTemporaryDirectory(): ci.save(img, 'test.pconnscalar.nii') @@ -485,8 +502,8 @@ def test_pconnscalar(): def test_wrong_shape(): - scalar_map = create_scalar_map((0, )) - brain_model_map = create_geometry_map((1, )) + scalar_map = create_scalar_map((0,)) + brain_model_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((scalar_map, brain_model_map)) @@ -506,7 +523,6 @@ def test_wrong_shape(): ci.Cifti2Image(data, hdr) with suppress_warnings(): img = ci.Cifti2Image(data, hdr) - + with pytest.raises(ValueError): img.to_file_map() - diff --git a/nibabel/cmdline/conform.py b/nibabel/cmdline/conform.py index cfa86b6951..52c80b5263 100644 --- a/nibabel/cmdline/conform.py +++ b/nibabel/cmdline/conform.py @@ -22,19 +22,25 @@ def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) - p.add_argument("infile", - help="Neuroimaging volume to conform.") - p.add_argument("outfile", - help="Name of output file.") - p.add_argument("--out-shape", nargs=3, default=(256, 256, 256), type=int, - help="Shape of the conformed output.") - p.add_argument("--voxel-size", nargs=3, default=(1, 1, 1), type=int, - help="Voxel size in millimeters of the conformed output.") - p.add_argument("--orientation", default="RAS", - help="Orientation of the conformed output.") - p.add_argument("-f", "--force", action="store_true", - help="Overwrite existing output files.") - p.add_argument("-V", "--version", action="version", version=f"{p.prog} {__version__}") + p.add_argument('infile', help='Neuroimaging volume to conform.') + p.add_argument('outfile', help='Name of output file.') + p.add_argument( + '--out-shape', + nargs=3, + default=(256, 256, 256), + type=int, + help='Shape of the conformed output.', + ) + p.add_argument( + '--voxel-size', + nargs=3, + default=(1, 1, 1), + type=int, + help='Voxel size in millimeters of the conformed output.', + ) + p.add_argument('--orientation', default='RAS', help='Orientation of the conformed output.') + p.add_argument('-f', '--force', action='store_true', help='Overwrite existing output files.') + p.add_argument('-V', '--version', action='version', version=f'{p.prog} {__version__}') return p @@ -46,7 +52,7 @@ def main(args=None): from_img = load(opts.infile) if not opts.force and Path(opts.outfile).exists(): - raise FileExistsError(f"Output file exists: {opts.outfile}") + raise FileExistsError(f'Output file exists: {opts.outfile}') out_img = conform( from_img=from_img, @@ -54,6 +60,7 @@ def main(args=None): voxel_size=opts.voxel_size, order=3, cval=0.0, - orientation=opts.orientation) + orientation=opts.orientation, + ) save(out_img, opts.outfile) diff --git a/nibabel/cmdline/convert.py b/nibabel/cmdline/convert.py index 8f1042c71d..ce80d8c709 100644 --- a/nibabel/cmdline/convert.py +++ b/nibabel/cmdline/convert.py @@ -21,20 +21,26 @@ def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) - p.add_argument("infile", - help="Neuroimaging volume to convert") - p.add_argument("outfile", - help="Name of output file") - p.add_argument("--out-dtype", action="store", - help="On-disk data type; valid argument to numpy.dtype()") - p.add_argument("--image-type", action="store", - help="Name of NiBabel image class to create, e.g. Nifti1Image. " - "If specified, will be used prior to setting dtype. If unspecified, " - "a new image like `infile` will be created and converted to a type " - "matching the extension of `outfile`.") - p.add_argument("-f", "--force", action="store_true", - help="Overwrite output file if it exists, and ignore warnings if possible") - p.add_argument("-V", "--version", action="version", version=f"{p.prog} {nib.__version__}") + p.add_argument('infile', help='Neuroimaging volume to convert') + p.add_argument('outfile', help='Name of output file') + p.add_argument( + '--out-dtype', action='store', help='On-disk data type; valid argument to numpy.dtype()' + ) + p.add_argument( + '--image-type', + action='store', + help='Name of NiBabel image class to create, e.g. Nifti1Image. ' + 'If specified, will be used prior to setting dtype. If unspecified, ' + 'a new image like `infile` will be created and converted to a type ' + 'matching the extension of `outfile`.', + ) + p.add_argument( + '-f', + '--force', + action='store_true', + help='Overwrite output file if it exists, and ignore warnings if possible', + ) + p.add_argument('-V', '--version', action='version', version=f'{p.prog} {nib.__version__}') return p @@ -46,7 +52,7 @@ def main(args=None): orig = nib.load(opts.infile) if not opts.force and Path(opts.outfile).exists(): - raise FileExistsError(f"Output file exists: {opts.outfile}") + raise FileExistsError(f'Output file exists: {opts.outfile}') if opts.image_type: klass = getattr(nib, opts.image_type) @@ -59,7 +65,7 @@ def main(args=None): out_img.set_data_dtype(opts.out_dtype) except Exception as e: if opts.force: - warnings.warn(f"Ignoring error: {e!r}") + warnings.warn(f'Ignoring error: {e!r}') else: raise diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 33532cf8e7..efba4809c7 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -20,11 +20,13 @@ class dummy_fuse: """Dummy fuse "module" so that nose does not blow during doctests""" + Fuse = object try: import fuse + uid = os.getuid() gid = os.getgid() except ImportError: @@ -43,7 +45,6 @@ class dummy_fuse: class FileHandle: - def __init__(self, fno): self.fno = fno self.keep_cache = False @@ -54,11 +55,9 @@ def __str__(self): class DICOMFS(fuse.Fuse): - def __init__(self, *args, **kwargs): if fuse is dummy_fuse: - raise RuntimeError( - "fuse module is not available, install it to use DICOMFS") + raise RuntimeError('fuse module is not available, install it to use DICOMFS') self.followlinks = kwargs.pop('followlinks', False) self.dicom_path = kwargs.pop('dicom_path', None) fuse.Fuse.__init__(self, *args, **kwargs) @@ -91,9 +90,11 @@ def get_paths(self): series_info += 'bits allocated: %d\n' % series.bits_allocated series_info += 'bits stored: %d\n' % series.bits_stored series_info += 'storage instances: %d\n' % len(series.storage_instances) - d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), - f'{series.number}.nii': (series.nifti_size, series.as_nifti), - f'{series.number}.png': (series.png_size, series.as_png)} + d[series.number] = { + 'INFO': series_info.encode('ascii', 'replace'), + f'{series.number}.nii': (series.nifti_size, series.as_nifti), + f'{series.number}.png': (series.png_size, series.as_png), + } pd[study_datetime] = d return paths @@ -103,7 +104,7 @@ def match_path(self, path): logger.debug('return root') return wd for part in path.lstrip('/').split('/'): - logger.debug(f"path:{path} part:{part}") + logger.debug(f'path:{path} part:{part}') if part not in wd: return None wd = wd[part] @@ -180,7 +181,7 @@ def read(self, path, size, offset, fh): logger.debug(size) logger.debug(offset) logger.debug(fh) - return self.fhs[fh.fno][offset:offset + size] + return self.fhs[fh.fno][offset : offset + size] def release(self, path, flags, fh): logger.debug('release') @@ -192,21 +193,37 @@ def release(self, path, flags, fh): def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="{} [OPTIONS] ".format( - os.path.basename(sys.argv[0])), - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="make noise. Could be specified multiple times"), - ]) - - p.add_options([ - Option("-L", "--follow-links", action="store_true", - dest="followlinks", default=False, - help="Follow symbolic links in DICOM directory"), - ]) + usage='{} [OPTIONS] '.format( + os.path.basename(sys.argv[0]) + ), + version='%prog ' + nib.__version__, + ) + + p.add_options( + [ + Option( + '-v', + '--verbose', + action='count', + dest='verbose', + default=0, + help='make noise. Could be specified multiple times', + ), + ] + ) + + p.add_options( + [ + Option( + '-L', + '--follow-links', + action='store_true', + dest='followlinks', + default=False, + help='Follow symbolic links in DICOM directory', + ), + ] + ) return p @@ -219,13 +236,11 @@ def main(args=None): logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) if len(files) != 2: - sys.stderr.write(f"Please provide two arguments:\n{parser.usage}\n") + sys.stderr.write(f'Please provide two arguments:\n{parser.usage}\n') sys.exit(1) fs = DICOMFS( - dash_s_do='setsingle', - followlinks=opts.followlinks, - dicom_path=files[0].decode(encoding) + dash_s_do='setsingle', followlinks=opts.followlinks, dicom_path=files[0].decode(encoding) ) fs.parse(['-f', '-s', files[1]]) try: diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index b48033eb45..5ec5f425ee 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -32,40 +32,56 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="Make more noise. Could be specified multiple times"), - - Option("-H", "--header-fields", - dest="header_fields", default='all', - help="Header fields (comma separated) to be printed as well" - " (if present)"), - - Option("--ma", "--data-max-abs-diff", - dest="data_max_abs_diff", - type=float, - default=0.0, - help="Maximal absolute difference in data between files" - " to tolerate."), - - Option("--mr", "--data-max-rel-diff", - dest="data_max_rel_diff", - type=float, - default=0.0, - help="Maximal relative difference in data between files to" - " tolerate. If --data-max-abs-diff is also specified," - " only the data points with absolute difference greater" - " than that value would be considered for relative" - " difference check."), - Option("--dt", "--datatype", - dest="dtype", - default=np.float64, - help="Enter a numpy datatype such as 'float32'.") - ]) + usage=f'{sys.argv[0]} [OPTIONS] [FILE ...]\n\n' + __doc__, + version='%prog ' + nib.__version__, + ) + + p.add_options( + [ + Option( + '-v', + '--verbose', + action='count', + dest='verbose', + default=0, + help='Make more noise. Could be specified multiple times', + ), + Option( + '-H', + '--header-fields', + dest='header_fields', + default='all', + help='Header fields (comma separated) to be printed as well' ' (if present)', + ), + Option( + '--ma', + '--data-max-abs-diff', + dest='data_max_abs_diff', + type=float, + default=0.0, + help='Maximal absolute difference in data between files' ' to tolerate.', + ), + Option( + '--mr', + '--data-max-rel-diff', + dest='data_max_rel_diff', + type=float, + default=0.0, + help='Maximal relative difference in data between files to' + ' tolerate. If --data-max-abs-diff is also specified,' + ' only the data points with absolute difference greater' + ' than that value would be considered for relative' + ' difference check.', + ), + Option( + '--dt', + '--datatype', + dest='dtype', + default=np.float64, + help="Enter a numpy datatype such as 'float32'.", + ), + ] + ) return p @@ -94,7 +110,7 @@ def are_values_different(*values): except TypeError as exc: str_exc = str(exc) # Not implemented in numpy 1.7.1 - if "not supported" in str_exc or "not implemented" in str_exc: + if 'not supported' in str_exc or 'not implemented' in str_exc: value0_nans = None else: raise @@ -104,8 +120,7 @@ def are_values_different(*values): return True elif isinstance(value0, np.ndarray): # use .dtype.type to provide endianness agnostic comparison - if value0.dtype.type != value.dtype.type or \ - value0.shape != value.shape: + if value0.dtype.type != value.dtype.type or value0.shape != value.shape: return True # there might be nans and they need special treatment if value0_nans is not None: @@ -159,15 +174,15 @@ def get_headers_diff(file_headers, names=None): def get_data_hash_diff(files, dtype=np.float64): """Get difference between md5 values of data - Parameters - ---------- - files: list of actual files + Parameters + ---------- + files: list of actual files - Returns - ------- - list - np.array: md5 values of respective files - """ + Returns + ------- + list + np.array: md5 values of respective files + """ md5sums = [ hashlib.md5(np.ascontiguousarray(nib.load(f).get_fdata(dtype=dtype))).hexdigest() @@ -209,14 +224,13 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): """ # we are doomed to keep them in RAM now - data = [f if isinstance(f, np.ndarray) else nib.load(f).get_fdata(dtype=dtype) - for f in files] + data = [f if isinstance(f, np.ndarray) else nib.load(f).get_fdata(dtype=dtype) for f in files] diffs = OrderedDict() for i, d1 in enumerate(data[:-1]): # populate empty entries for non-compared diffs1 = [None] * (i + 1) - for j, d2 in enumerate(data[i + 1:], i + 1): + for j, d2 in enumerate(data[i + 1 :], i + 1): if d1.shape == d2.shape: abs_diff = np.abs(d1 - d2) @@ -251,7 +265,7 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1.append(None) else: - diffs1.append({'CMP': "incompat"}) + diffs1.append({'CMP': 'incompat'}) if any(diffs1): @@ -263,28 +277,28 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): def display_diff(files, diff): """Format header differences into a nice string - Parameters - ---------- - files: list of files that were compared so we can print their names - diff: dict of different valued header fields + Parameters + ---------- + files: list of files that were compared so we can print their names + diff: dict of different valued header fields - Returns - ------- - str - string-formatted table of differences + Returns + ------- + str + string-formatted table of differences """ - output = "" - field_width = "{:<15}" - filename_width = "{:<53}" - value_width = "{:<55}" + output = '' + field_width = '{:<15}' + filename_width = '{:<53}' + value_width = '{:<55}' - output += "These files are different.\n" + output += 'These files are different.\n' output += field_width.format('Field/File') for i, f in enumerate(files, 1): - output += "%d:%s" % (i, filename_width.format(os.path.basename(f))) + output += '%d:%s' % (i, filename_width.format(os.path.basename(f))) - output += "\n" + output += '\n' for key, value in diff.items(): output += field_width.format(key) @@ -305,14 +319,15 @@ def display_diff(files, diff): item_str = re.sub('[\x00]', '?', item_str) output += value_width.format(item_str) - output += "\n" + output += '\n' return output -def diff(files, header_fields='all', data_max_abs_diff=None, - data_max_rel_diff=None, dtype=np.float64): - assert len(files) >= 2, "Please enter at least two files" +def diff( + files, header_fields='all', data_max_abs_diff=None, data_max_rel_diff=None, dtype=np.float64 +): + assert len(files) >= 2, 'Please enter at least two files' file_headers = [nib.load(f).header for f in files] @@ -330,10 +345,9 @@ def diff(files, header_fields='all', data_max_abs_diff=None, if data_md5_diffs: # provide details, possibly triggering the ignore of the difference # in data - data_diffs = get_data_diff(files, - max_abs=data_max_abs_diff, - max_rel=data_max_rel_diff, - dtype=dtype) + data_diffs = get_data_diff( + files, max_abs=data_max_abs_diff, max_rel=data_max_rel_diff, dtype=dtype + ) if data_diffs: diff['DATA(md5)'] = data_md5_diffs diff.update(data_diffs) @@ -359,12 +373,12 @@ def main(args=None, out=None): header_fields=opts.header_fields, data_max_abs_diff=opts.data_max_abs_diff, data_max_rel_diff=opts.data_max_rel_diff, - dtype=opts.dtype + dtype=opts.dtype, ) if files_diff: out.write(display_diff(files, files_diff)) raise SystemExit(1) else: - out.write("These files are identical.\n") + out.write('These files are identical.\n') raise SystemExit(0) diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 2995ff58c5..1bb9396bb3 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -20,8 +20,7 @@ import nibabel.cmdline.utils from nibabel.cmdline.utils import _err, verbose, table2string, ap, safe_get -__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' \ - 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' 'and NiBabel contributors' __license__ = 'MIT' @@ -31,58 +30,88 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="Make more noise. Could be specified multiple times"), - - Option("-H", "--header-fields", - dest="header_fields", default='', - help="Header fields (comma separated) to be printed as well (if present)"), - - Option("-s", "--stats", - action="store_true", dest='stats', default=False, - help="Output basic data statistics"), - - Option("-c", "--counts", - action="store_true", dest='counts', default=False, - help="Output counts - number of entries for each numeric value " - "(useful for int ROI maps)"), - - Option("--all-counts", - action="store_true", dest='all_counts', default=False, - help="Output all counts, even if number of unique values > %d" % MAX_UNIQUE), - - Option("-z", "--zeros", - action="store_true", dest='stats_zeros', default=False, - help="Include zeros into output basic data statistics (--stats, --counts)"), - ]) + usage=f'{sys.argv[0]} [OPTIONS] [FILE ...]\n\n' + __doc__, + version='%prog ' + nib.__version__, + ) + + p.add_options( + [ + Option( + '-v', + '--verbose', + action='count', + dest='verbose', + default=0, + help='Make more noise. Could be specified multiple times', + ), + Option( + '-H', + '--header-fields', + dest='header_fields', + default='', + help='Header fields (comma separated) to be printed as well (if present)', + ), + Option( + '-s', + '--stats', + action='store_true', + dest='stats', + default=False, + help='Output basic data statistics', + ), + Option( + '-c', + '--counts', + action='store_true', + dest='counts', + default=False, + help='Output counts - number of entries for each numeric value ' + '(useful for int ROI maps)', + ), + Option( + '--all-counts', + action='store_true', + dest='all_counts', + default=False, + help='Output all counts, even if number of unique values > %d' % MAX_UNIQUE, + ), + Option( + '-z', + '--zeros', + action='store_true', + dest='stats_zeros', + default=False, + help='Include zeros into output basic data statistics (--stats, --counts)', + ), + ] + ) return p def proc_file(f, opts): - verbose(1, f"Loading {f}") + verbose(1, f'Loading {f}') - row = [f"@l{f}"] + row = [f'@l{f}'] try: vol = nib.load(f) h = vol.header except Exception as e: row += ['failed'] - verbose(2, f"Failed to gather information -- {e}") + verbose(2, f'Failed to gather information -- {e}') return row - row += [str(safe_get(h, 'data_dtype')), - f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]", - f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"] + row += [ + str(safe_get(h, 'data_dtype')), + f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]", + f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}", + ] # Slope - if hasattr(h, 'has_data_slope') and \ - (h.has_data_slope or h.has_data_intercept) and \ - not h.get_slope_inter() in [(1.0, 0.0), (None, None)]: + if ( + hasattr(h, 'has_data_slope') + and (h.has_data_slope or h.has_data_intercept) + and not h.get_slope_inter() in [(1.0, 0.0), (None, None)] + ): row += ['@l*%.3g+%.3g' % h.get_slope_inter()] else: row += [''] @@ -110,13 +139,16 @@ def proc_file(f, opts): row += [_err()] try: - if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and - (h.get_qform() != h.get_sform()).any()): + if ( + hasattr(h, 'get_qform') + and hasattr(h, 'get_sform') + and (h.get_qform() != h.get_sform()).any() + ): row += ['sform'] else: row += [''] except Exception as e: - verbose(2, f"Failed to obtain qform or sform -- {e}") + verbose(2, f'Failed to obtain qform or sform -- {e}') if isinstance(h, nib.AnalyzeHeader): row += [''] else: @@ -134,19 +166,19 @@ def proc_file(f, opts): d = d.reshape(-1) if opts.stats: # just # of elements - row += ["@l[%d]" % np.prod(d.shape)] + row += ['@l[%d]' % np.prod(d.shape)] # stats row += [f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' if len(d) else '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: - counts = _err("%d uniques. Use --all-counts" % len(items)) + counts = _err('%d uniques. Use --all-counts' % len(items)) else: freq = np.bincount(inv) - counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq)) - row += ["@l" + counts] + counts = ' '.join('%g:%d' % (i, f) for i, f in zip(items, freq)) + row += ['@l' + counts] except OSError as e: - verbose(2, f"Failed to obtain stats/counts -- {e}") + verbose(2, f'Failed to obtain stats/counts -- {e}') row += [_err()] return row diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 51867da065..64f02694ee 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -7,7 +7,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Print nifti diagnostics for header files """ +"""Print nifti diagnostics for header files""" import sys from optparse import OptionParser @@ -15,16 +15,15 @@ import nibabel as nib __author__ = 'Matthew Brett' -__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' \ - 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' 'and NiBabel contributors' __license__ = 'MIT' def main(args=None): - """ Go go team """ + """Go go team""" parser = OptionParser( - usage=f"{sys.argv[0]} [FILE ...]\n\n" + __doc__, - version="%prog " + nib.__version__) + usage=f'{sys.argv[0]} [FILE ...]\n\n' + __doc__, version='%prog ' + nib.__version__ + ) (opts, files) = parser.parse_args(args=args) for fname in files: @@ -32,7 +31,7 @@ def main(args=None): hdr = fobj.read(nib.nifti1.header_dtype.itemsize) result = nib.Nifti1Header.diagnose_binaryblock(hdr) if len(result): - print(f'Picky header check output for "{fname}\"\n') + print(f'Picky header check output for "{fname}"\n') print(result + '\n') else: print(f'Header for "{fname}" is clean') diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 0f868bd06b..f0d5b207f7 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -14,123 +14,227 @@ import nibabel.nifti1 as nifti1 from nibabel.filename_parser import splitext_addext from nibabel.volumeutils import fname_ext_ul_case -from nibabel.orientations import (io_orientation, inv_ornt_aff, - apply_orientation) +from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation from nibabel.affines import apply_affine, from_matvec, to_matvec def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage=f"{sys.argv[0]} [OPTIONS] \n\n" + __doc__, - version="%prog " + nibabel.__version__) + usage=f'{sys.argv[0]} [OPTIONS] \n\n' + __doc__, + version='%prog ' + nibabel.__version__, + ) p.add_option( - Option("-v", "--verbose", action="store_true", dest="verbose", - default=False, - help="""Make some noise.""")) + Option( + '-v', + '--verbose', + action='store_true', + dest='verbose', + default=False, + help="""Make some noise.""", + ) + ) p.add_option( - Option("-o", "--output-dir", action="store", type="string", - dest="outdir", default=None, - help=one_line("""Destination directory for NIfTI files. - Default: current directory."""))) + Option( + '-o', + '--output-dir', + action='store', + type='string', + dest='outdir', + default=None, + help=one_line( + """Destination directory for NIfTI files. + Default: current directory.""" + ), + ) + ) p.add_option( - Option("-c", "--compressed", action="store_true", - dest="compressed", default=False, - help="Whether to write compressed NIfTI files or not.")) + Option( + '-c', + '--compressed', + action='store_true', + dest='compressed', + default=False, + help='Whether to write compressed NIfTI files or not.', + ) + ) p.add_option( - Option("-p", "--permit-truncated", action="store_true", - dest="permit_truncated", default=False, - help=one_line( - """Permit conversion of truncated recordings. Support for + Option( + '-p', + '--permit-truncated', + action='store_true', + dest='permit_truncated', + default=False, + help=one_line( + """Permit conversion of truncated recordings. Support for this is experimental, and results *must* be checked - afterward for validity."""))) + afterward for validity.""" + ), + ) + ) p.add_option( - Option("-b", "--bvs", action="store_true", dest="bvs", default=False, - help=one_line( - """Output bvals/bvecs files in addition to NIFTI - image."""))) + Option( + '-b', + '--bvs', + action='store_true', + dest='bvs', + default=False, + help=one_line( + """Output bvals/bvecs files in addition to NIFTI + image.""" + ), + ) + ) p.add_option( - Option("-d", "--dwell-time", action="store_true", default=False, - dest="dwell_time", - help=one_line( - """Calculate the scan dwell time. If supplied, the magnetic + Option( + '-d', + '--dwell-time', + action='store_true', + default=False, + dest='dwell_time', + help=one_line( + """Calculate the scan dwell time. If supplied, the magnetic field strength should also be supplied using --field-strength (default 3). The field strength must be supplied because it is not encoded in the PAR/REC - format."""))) + format.""" + ), + ) + ) p.add_option( - Option("--field-strength", action="store", type="float", - dest="field_strength", - help=one_line( - """The magnetic field strength of the recording, only needed + Option( + '--field-strength', + action='store', + type='float', + dest='field_strength', + help=one_line( + """The magnetic field strength of the recording, only needed for --dwell-time. The field strength must be supplied - because it is not encoded in the PAR/REC format."""))) + because it is not encoded in the PAR/REC format.""" + ), + ) + ) p.add_option( - Option("-i", "--volume-info", action="store_true", dest="vol_info", - default=False, - help=one_line( - """Export .PAR volume labels corresponding to the fourth + Option( + '-i', + '--volume-info', + action='store_true', + dest='vol_info', + default=False, + help=one_line( + """Export .PAR volume labels corresponding to the fourth dimension of the data. The dimension info will be stored in CSV format with the first row containing dimension labels and the subsequent rows (one per volume), the corresponding indices. Only labels that vary along the 4th dimension are exported (e.g. for a single volume structural scan there are no dynamic labels and no output file will be created). - """))) + """ + ), + ) + ) p.add_option( - Option("--origin", action="store", dest="origin", default="scanner", - help=one_line( - """Reference point of the q-form transformation of the NIfTI + Option( + '--origin', + action='store', + dest='origin', + default='scanner', + help=one_line( + """Reference point of the q-form transformation of the NIfTI image. If 'scanner' the (0,0,0) coordinates will refer to the scanner's iso center. If 'fov', this coordinate will be the center of the recorded volume (field of view). Default: - 'scanner'."""))) + 'scanner'.""" + ), + ) + ) p.add_option( - Option("--minmax", action="store", nargs=2, dest="minmax", - help=one_line( - """Minimum and maximum settings to be stored in the NIfTI + Option( + '--minmax', + action='store', + nargs=2, + dest='minmax', + help=one_line( + """Minimum and maximum settings to be stored in the NIfTI header. If any of them is set to 'parse', the scaled data is scanned for the actual minimum and maximum. To bypass this potentially slow and memory intensive step (the data has to be scaled and fully loaded into memory), fixed values can be provided as space-separated pair, e.g. '5.4 120.4'. It is possible to set a fixed minimum as scan for the actual - maximum (and vice versa). Default: 'parse parse'."""))) + maximum (and vice versa). Default: 'parse parse'.""" + ), + ) + ) p.set_defaults(minmax=('parse', 'parse')) p.add_option( - Option("--store-header", action="store_true", dest="store_header", - default=False, - help=one_line( - """If set, all information from the PAR header is stored in - an extension of the NIfTI file header. Default: off"""))) + Option( + '--store-header', + action='store_true', + dest='store_header', + default=False, + help=one_line( + """If set, all information from the PAR header is stored in + an extension of the NIfTI file header. Default: off""" + ), + ) + ) p.add_option( - Option("--scaling", action="store", dest="scaling", default='dv', - help=one_line( - """Choose data scaling setting. The PAR header defines two + Option( + '--scaling', + action='store', + dest='scaling', + default='dv', + help=one_line( + """Choose data scaling setting. The PAR header defines two different data scaling settings: 'dv' (values displayed on console) and 'fp' (floating point values). Either one can be chosen, or scaling can be disabled completely ('off'). Note that neither method will actually scale the data, but just store the corresponding settings in the NIfTI header, unless non-uniform scaling is used, in which case the data is - stored in the file in scaled form. Default: 'dv'"""))) + stored in the file in scaled form. Default: 'dv'""" + ), + ) + ) p.add_option( - Option('--keep-trace', action="store_true", dest='keep_trace', - default=False, - help=one_line("""Do not discard the diagnostic Philips DTI - trace volume, if it exists in the data."""))) + Option( + '--keep-trace', + action='store_true', + dest='keep_trace', + default=False, + help=one_line( + """Do not discard the diagnostic Philips DTI + trace volume, if it exists in the data.""" + ), + ) + ) p.add_option( - Option("--overwrite", action="store_true", dest="overwrite", - default=False, - help=one_line("""Overwrite file if it exists. Default: - False"""))) + Option( + '--overwrite', + action='store_true', + dest='overwrite', + default=False, + help=one_line( + """Overwrite file if it exists. Default: + False""" + ), + ) + ) p.add_option( - Option("--strict-sort", action="store_true", dest="strict_sort", - default=False, - help=one_line("""Use additional keys in determining the order + Option( + '--strict-sort', + action='store_true', + dest='strict_sort', + default=False, + help=one_line( + """Use additional keys in determining the order to sort the slices within the .REC file. This may be necessary for more complicated scans with multiple echos, - cardiac phases, ASL label states, etc."""))) + cardiac phases, ASL label states, etc.""" + ), + ) + ) return p @@ -163,10 +267,12 @@ def proc_file(infile, opts): # load the PAR header and data scaling = 'dv' if opts.scaling == 'off' else opts.scaling infile = fname_ext_ul_case(infile) - pr_img = pr.load(infile, - permit_truncated=opts.permit_truncated, - scaling=scaling, - strict_sort=opts.strict_sort) + pr_img = pr.load( + infile, + permit_truncated=opts.permit_truncated, + scaling=scaling, + strict_sort=opts.strict_sort, + ) pr_hdr = pr_img.header affine = pr_hdr.get_affine(origin=opts.origin) slope, intercept = pr_hdr.get_data_scaling(scaling) @@ -174,8 +280,8 @@ def proc_file(infile, opts): verbose(f'Using data scaling "{opts.scaling}"') # get original scaling, and decide if we scale in-place or not if opts.scaling == 'off': - slope = np.array([1.]) - intercept = np.array([0.]) + slope = np.array([1.0]) + intercept = np.array([0.0]) in_data = pr_img.dataobj.get_unscaled() out_dtype = pr_hdr.get_data_dtype() elif not np.any(np.diff(slope)) and not np.any(np.diff(intercept)): @@ -186,15 +292,13 @@ def proc_file(infile, opts): out_dtype = pr_hdr.get_data_dtype() else: # Multi scalefactor case - slope = np.array([1.]) - intercept = np.array([0.]) + slope = np.array([1.0]) + intercept = np.array([0.0]) in_data = np.array(pr_img.dataobj) out_dtype = np.float64 # Reorient data block to LAS+ if necessary ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine)) - if np.all(ornt == [[0, 1], - [1, 1], - [2, 1]]): # already in LAS+ + if np.all(ornt == [[0, 1], [1, 1], [2, 1]]): # already in LAS+ t_aff = np.eye(4) else: # Not in LAS+ t_aff = inv_ornt_aff(ornt, pr_img.shape) @@ -249,8 +353,10 @@ def proc_file(infile, opts): if bvals is None and bvecs is None: verbose('No DTI volumes detected, bvals and bvecs not written') elif bvecs is None: - verbose('DTI volumes detected, but no diffusion direction info was' - 'found. Writing .bvals file only.') + verbose( + 'DTI volumes detected, but no diffusion direction info was' + 'found. Writing .bvals file only.' + ) with open(basefilename + '.bvals', 'w') as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: @@ -288,14 +394,15 @@ def proc_file(infile, opts): if opts.dwell_time: try: dwell_time = calculate_dwell_time( - pr_hdr.get_water_fat_shift(), - pr_hdr.get_echo_train_length(), - opts.field_strength) + pr_hdr.get_water_fat_shift(), pr_hdr.get_echo_train_length(), opts.field_strength + ) except MRIError: verbose('No EPI factors, dwell time not written') else: - verbose(f'Writing dwell time ({dwell_time!r} sec) ' - f'calculated assuming {opts.field_strength}T magnet') + verbose( + f'Writing dwell time ({dwell_time!r} sec) ' + f'calculated assuming {opts.field_strength}T magnet' + ) with open(basefilename + '.dwell_time', 'w') as fid: fid.write(f'{dwell_time!r}\n') # done @@ -322,7 +429,6 @@ def main(): errs.append(f'{infile}: {e}') if len(errs): - error('Caught %i exceptions. Dump follows:\n\n %s' - % (len(errs), '\n'.join(errs)), 1) + error('Caught %i exceptions. Dump follows:\n\n %s' % (len(errs), '\n'.join(errs)), 1) else: verbose('Done') diff --git a/nibabel/cmdline/roi.py b/nibabel/cmdline/roi.py index 0631ecc0d1..690bb0b646 100644 --- a/nibabel/cmdline/roi.py +++ b/nibabel/cmdline/roi.py @@ -6,7 +6,7 @@ def lossless_slice(img, slicers): if not nb.imageclasses.spatial_axes_first(img): - raise ValueError("Cannot slice an image that is not known to have spatial axes first") + raise ValueError('Cannot slice an image that is not known to have spatial axes first') scaling = hasattr(img.header, 'set_slope_inter') @@ -21,41 +21,44 @@ def lossless_slice(img, slicers): def parse_slice(crop, allow_step=True): if crop is None: return slice(None) - start, stop, *extra = [int(val) if val else None for val in crop.split(":")] + start, stop, *extra = [int(val) if val else None for val in crop.split(':')] if len(extra) > 1: - raise ValueError(f"Cannot parse specification: {crop}") + raise ValueError(f'Cannot parse specification: {crop}') if not allow_step and extra and extra[0] not in (1, None): - raise ValueError(f"Step entry not permitted: {crop}") + raise ValueError(f'Step entry not permitted: {crop}') step = extra[0] if extra else None if step not in (1, -1, None): - raise ValueError(f"Downsampling is not supported: {crop}") + raise ValueError(f'Downsampling is not supported: {crop}') return slice(start, stop, step) def sanitize(args): # Argparse likes to treat "-1:..." as a flag - return [f' {arg}' if arg[0] == '-' and ":" in arg else arg - for arg in args] + return [f' {arg}' if arg[0] == '-' and ':' in arg else arg for arg in args] def main(args=None): if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( - description="Crop images to a region of interest", - epilog="If a start or stop value is omitted, the start or end of the axis is assumed.") + description='Crop images to a region of interest', + epilog='If a start or stop value is omitted, the start or end of the axis is assumed.', + ) parser.add_argument('--version', action='version', version=nb.__version__) - parser.add_argument("-i", metavar="I1:I2[:-1]", - help="Start/stop [flip] along first axis (0-indexed)") - parser.add_argument("-j", metavar="J1:J2[:-1]", - help="Start/stop [flip] along second axis (0-indexed)") - parser.add_argument("-k", metavar="K1:K2[:-1]", - help="Start/stop [flip] along third axis (0-indexed)") - parser.add_argument("-t", metavar="T1:T2", help="Start/stop along fourth axis (0-indexed)") - parser.add_argument("in_file", help="Image file to crop") - parser.add_argument("out_file", help="Output file name") + parser.add_argument( + '-i', metavar='I1:I2[:-1]', help='Start/stop [flip] along first axis (0-indexed)' + ) + parser.add_argument( + '-j', metavar='J1:J2[:-1]', help='Start/stop [flip] along second axis (0-indexed)' + ) + parser.add_argument( + '-k', metavar='K1:K2[:-1]', help='Start/stop [flip] along third axis (0-indexed)' + ) + parser.add_argument('-t', metavar='T1:T2', help='Start/stop along fourth axis (0-indexed)') + parser.add_argument('in_file', help='Image file to crop') + parser.add_argument('out_file', help='Output file name') opts = parser.parse_args(args=sanitize(args)) @@ -65,7 +68,7 @@ def main(args=None): kslice = parse_slice(opts.k) tslice = parse_slice(opts.t, allow_step=False) except ValueError as err: - print(f"Could not parse input arguments. Reason follows.\n{err}") + print(f'Could not parse input arguments. Reason follows.\n{err}') return 1 kwargs = {} @@ -73,16 +76,16 @@ def main(args=None): kwargs['mmap'] = False img = nb.load(opts.in_file, **kwargs) - slicers = (islice, jslice, kslice, tslice)[:img.ndim] + slicers = (islice, jslice, kslice, tslice)[: img.ndim] expected_shape = nb.fileslice.predict_shape(slicers, img.shape) if any(dim == 0 for dim in expected_shape): - print(f"Cannot take zero-length slices. Predicted shape {expected_shape}.") + print(f'Cannot take zero-length slices. Predicted shape {expected_shape}.') return 1 try: sliced_img = lossless_slice(img, slicers) except Exception: - print("Could not slice image. Full traceback follows.") + print('Could not slice image. Full traceback follows.') raise nb.save(sliced_img, opts.out_file) return 0 diff --git a/nibabel/cmdline/stats.py b/nibabel/cmdline/stats.py index 91b9f7c104..5c5d58f93c 100644 --- a/nibabel/cmdline/stats.py +++ b/nibabel/cmdline/stats.py @@ -19,12 +19,21 @@ def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) - p.add_argument("infile", - help="Neuroimaging volume to compute statistics on.") - p.add_argument("-V", "--Volume", action="store_true", required=False, - help="Compute mask volume of a given mask image.") - p.add_argument("--units", default="mm3", required=False, - choices=("mm3", "vox"), help="Preferred output units") + p.add_argument('infile', help='Neuroimaging volume to compute statistics on.') + p.add_argument( + '-V', + '--Volume', + action='store_true', + required=False, + help='Compute mask volume of a given mask image.', + ) + p.add_argument( + '--units', + default='mm3', + required=False, + choices=('mm3', 'vox'), + help='Preferred output units', + ) return p diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index 3c25ea3266..f50801c714 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -11,14 +11,15 @@ def parse_args(): - DESCRIPTION = "Convert tractograms (TCK -> TRK)." + DESCRIPTION = 'Convert tractograms (TCK -> TRK).' parser = argparse.ArgumentParser(description=DESCRIPTION) - parser.add_argument("anatomy", - help="reference anatomical image (.nii|.nii.gz.") - parser.add_argument("tractograms", metavar="tractogram", nargs="+", - help="list of tractograms (.tck).") - parser.add_argument("-f", "--force", action="store_true", - help="overwrite existing output files.") + parser.add_argument('anatomy', help='reference anatomical image (.nii|.nii.gz.') + parser.add_argument( + 'tractograms', metavar='tractogram', nargs='+', help='list of tractograms (.tck).' + ) + parser.add_argument( + '-f', '--force', action='store_true', help='overwrite existing output files.' + ) args = parser.parse_args() return args, parser @@ -30,7 +31,7 @@ def main(): try: nii = nib.load(args.anatomy) except Exception: - parser.error("Expecting anatomical image as first argument.") + parser.error('Expecting anatomical image as first argument.') for tractogram in args.tractograms: tractogram_format = nib.streamlines.detect_format(tractogram) @@ -49,7 +50,7 @@ def main(): header[Field.VOXEL_TO_RASMM] = nii.affine.copy() header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3] header[Field.DIMENSIONS] = nii.shape[:3] - header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine)) + header[Field.VOXEL_ORDER] = ''.join(aff2axcodes(nii.affine)) tck = nib.streamlines.load(tractogram) nib.streamlines.save(tck.tractogram, output_filename, header=header) diff --git a/nibabel/cmdline/tests/test_conform.py b/nibabel/cmdline/tests/test_conform.py index 0f64f5953b..8e203b68f9 100644 --- a/nibabel/cmdline/tests/test_conform.py +++ b/nibabel/cmdline/tests/test_conform.py @@ -23,8 +23,8 @@ @needs_scipy def test_default(tmpdir): - infile = test_data(fname="anatomical.nii") - outfile = tmpdir / "output.nii.gz" + infile = test_data(fname='anatomical.nii') + outfile = tmpdir / 'output.nii.gz' main([str(infile), str(outfile)]) assert outfile.isfile() c = nib.load(outfile) @@ -35,19 +35,21 @@ def test_default(tmpdir): with pytest.raises(FileExistsError): main([str(infile), str(outfile)]) - main([str(infile), str(outfile), "--force"]) + main([str(infile), str(outfile), '--force']) assert outfile.isfile() @needs_scipy def test_nondefault(tmpdir): - infile = test_data(fname="anatomical.nii") - outfile = tmpdir / "output.nii.gz" + infile = test_data(fname='anatomical.nii') + outfile = tmpdir / 'output.nii.gz' out_shape = (100, 100, 150) voxel_size = (1, 2, 4) - orientation = "LAS" - args = (f"{infile} {outfile} --out-shape {' '.join(map(str, out_shape))} " - f"--voxel-size {' '.join(map(str, voxel_size))} --orientation {orientation}") + orientation = 'LAS' + args = ( + f"{infile} {outfile} --out-shape {' '.join(map(str, out_shape))} " + f"--voxel-size {' '.join(map(str, voxel_size))} --orientation {orientation}" + ) main(args.split()) assert outfile.isfile() c = nib.load(outfile) diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 487bfb7401..00f00602af 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -71,10 +71,13 @@ def test_convert_dtype(tmp_path, data_dtype): assert converted.get_data_dtype() == expected_dtype -@pytest.mark.parametrize('ext,img_class', [ - ('mgh', nib.MGHImage), - ('img', nib.Nifti1Pair), -]) +@pytest.mark.parametrize( + 'ext,img_class', + [ + ('mgh', nib.MGHImage), + ('img', nib.Nifti1Pair), + ], +) def test_convert_by_extension(tmp_path, ext, img_class): infile = test_data(fname='anatomical.nii') outfile = tmp_path / f'output.{ext}' @@ -91,11 +94,14 @@ def test_convert_by_extension(tmp_path, ext, img_class): assert converted.__class__ == img_class -@pytest.mark.parametrize('ext,img_class', [ - ('mgh', nib.MGHImage), - ('img', nib.Nifti1Pair), - ('nii', nib.Nifti2Image), -]) +@pytest.mark.parametrize( + 'ext,img_class', + [ + ('mgh', nib.MGHImage), + ('img', nib.Nifti1Pair), + ('nii', nib.Nifti2Image), + ], +) def test_convert_imgtype(tmp_path, ext, img_class): infile = test_data(fname='anatomical.nii') outfile = tmp_path / f'output.{ext}' @@ -122,7 +128,7 @@ def test_convert_nifti_int_fail(tmp_path): with pytest.raises(ValueError): convert.main([str(infile), str(outfile), '--out-dtype', 'int']) assert not outfile.exists() - + with pytest.warns(UserWarning): convert.main([str(infile), str(outfile), '--out-dtype', 'int', '--force']) assert outfile.is_file() @@ -135,13 +141,16 @@ def test_convert_nifti_int_fail(tmp_path): assert converted.get_data_dtype() == orig.get_data_dtype() -@pytest.mark.parametrize('orig_dtype,alias,expected_dtype', [ - ('int64', 'mask', 'uint8'), - ('int64', 'compat', 'int32'), - ('int64', 'smallest', 'uint8'), - ('float64', 'mask', 'uint8'), - ('float64', 'compat', 'float32'), -]) +@pytest.mark.parametrize( + 'orig_dtype,alias,expected_dtype', + [ + ('int64', 'mask', 'uint8'), + ('int64', 'compat', 'int32'), + ('int64', 'smallest', 'uint8'), + ('float64', 'mask', 'uint8'), + ('float64', 'compat', 'float32'), + ], +) def test_convert_aliases(tmp_path, orig_dtype, alias, expected_dtype): orig_fname = tmp_path / 'orig.nii' out_fname = tmp_path / 'out.nii' diff --git a/nibabel/cmdline/tests/test_parrec2nii.py b/nibabel/cmdline/tests/test_parrec2nii.py index c41679d84d..2100f3f478 100644 --- a/nibabel/cmdline/tests/test_parrec2nii.py +++ b/nibabel/cmdline/tests/test_parrec2nii.py @@ -1,4 +1,4 @@ -""" Tests for the parrec2nii exe code +"""Tests for the parrec2nii exe code """ from os.path import join, isfile, basename @@ -9,23 +9,29 @@ from nibabel.cmdline import parrec2nii from unittest.mock import Mock, MagicMock, patch -from numpy.testing import (assert_almost_equal, assert_array_equal) +from numpy.testing import assert_almost_equal, assert_array_equal from nibabel.tests.test_parrec import EG_PAR, VARY_PAR from nibabel.tmpdirs import InTemporaryDirectory AN_OLD_AFFINE = numpy.array( - [[-3.64994708, 0., 1.83564171, 123.66276611], - [0., -3.75, 0., 115.617], - [0.86045705, 0., 7.78655376, -27.91161211], - [0., 0., 0., 1.]]) + [ + [-3.64994708, 0.0, 1.83564171, 123.66276611], + [0.0, -3.75, 0.0, 115.617], + [0.86045705, 0.0, 7.78655376, -27.91161211], + [0.0, 0.0, 0.0, 1.0], + ] +) PAR_AFFINE = numpy.array( -[[ -3.64994708, 0. , 1.83564171, 107.63076611], - [ 0. , 3.75, 0. , -118.125 ], - [ 0.86045705, 0. , 7.78655376, -58.25061211], - [ 0. , 0. , 0. , 1. ]]) + [ + [-3.64994708, 0.0, 1.83564171, 107.63076611], + [0.0, 3.75, 0.0, -118.125], + [0.86045705, 0.0, 7.78655376, -58.25061211], + [0.0, 0.0, 0.0, 1.0], + ] +) @patch('nibabel.cmdline.parrec2nii.verbose') @@ -36,7 +42,7 @@ def test_parrec2nii_sets_qform_sform_code1(*args): # Check that set_sform(), set_qform() are called on the new header. parrec2nii.verbose.switch = False - parrec2nii.io_orientation.return_value = [[0, 1],[1, 1],[2, 1]] # LAS+ + parrec2nii.io_orientation.return_value = [[0, 1], [1, 1], [2, 1]] # LAS+ nimg = Mock() nhdr = MagicMock() diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index 4c640e9136..6a1229f72e 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -11,50 +11,50 @@ def test_parse_slice(): assert parse_slice(None) == slice(None) - assert parse_slice("1:5") == slice(1, 5) - assert parse_slice("1:") == slice(1, None) - assert parse_slice(":5") == slice(None, 5) - assert parse_slice(":-1") == slice(None, -1) - assert parse_slice("-5:-1") == slice(-5, -1) - assert parse_slice("1:5:") == slice(1, 5, None) - assert parse_slice("1::") == slice(1, None, None) - assert parse_slice(":5:") == slice(None, 5, None) - assert parse_slice(":-1:") == slice(None, -1, None) - assert parse_slice("-5:-1:") == slice(-5, -1, None) - assert parse_slice("1:5:1") == slice(1, 5, 1) - assert parse_slice("1::1") == slice(1, None, 1) - assert parse_slice(":5:1") == slice(None, 5, 1) - assert parse_slice(":-1:1") == slice(None, -1, 1) - assert parse_slice("-5:-1:1") == slice(-5, -1, 1) - assert parse_slice("5:1:-1") == slice(5, 1, -1) - assert parse_slice(":1:-1") == slice(None, 1, -1) - assert parse_slice("5::-1") == slice(5, None, -1) - assert parse_slice("-1::-1") == slice(-1, None, -1) - assert parse_slice("-1:-5:-1") == slice(-1, -5, -1) + assert parse_slice('1:5') == slice(1, 5) + assert parse_slice('1:') == slice(1, None) + assert parse_slice(':5') == slice(None, 5) + assert parse_slice(':-1') == slice(None, -1) + assert parse_slice('-5:-1') == slice(-5, -1) + assert parse_slice('1:5:') == slice(1, 5, None) + assert parse_slice('1::') == slice(1, None, None) + assert parse_slice(':5:') == slice(None, 5, None) + assert parse_slice(':-1:') == slice(None, -1, None) + assert parse_slice('-5:-1:') == slice(-5, -1, None) + assert parse_slice('1:5:1') == slice(1, 5, 1) + assert parse_slice('1::1') == slice(1, None, 1) + assert parse_slice(':5:1') == slice(None, 5, 1) + assert parse_slice(':-1:1') == slice(None, -1, 1) + assert parse_slice('-5:-1:1') == slice(-5, -1, 1) + assert parse_slice('5:1:-1') == slice(5, 1, -1) + assert parse_slice(':1:-1') == slice(None, 1, -1) + assert parse_slice('5::-1') == slice(5, None, -1) + assert parse_slice('-1::-1') == slice(-1, None, -1) + assert parse_slice('-1:-5:-1') == slice(-1, -5, -1) # Max of start:stop:step with pytest.raises(ValueError): - parse_slice("1:2:3:4") + parse_slice('1:2:3:4') # Integers only with pytest.raises(ValueError): - parse_slice("abc:2:3") + parse_slice('abc:2:3') with pytest.raises(ValueError): - parse_slice("1.2:2:3") + parse_slice('1.2:2:3') # Unit steps only with pytest.raises(ValueError): - parse_slice("1:5:2") + parse_slice('1:5:2') def test_parse_slice_disallow_step(): # Permit steps of 1 - assert parse_slice("1:5", False) == slice(1, 5) - assert parse_slice("1:5:", False) == slice(1, 5) - assert parse_slice("1:5:1", False) == slice(1, 5, 1) + assert parse_slice('1:5', False) == slice(1, 5) + assert parse_slice('1:5:', False) == slice(1, 5) + assert parse_slice('1:5:1', False) == slice(1, 5, 1) # Disable other steps with pytest.raises(ValueError): - parse_slice("1:5:-1", False) + parse_slice('1:5:-1', False) with pytest.raises(ValueError): - parse_slice("1:5:-2", False) + parse_slice('1:5:-2', False) def test_lossless_slice_unknown_axes(): @@ -66,7 +66,7 @@ def test_lossless_slice_unknown_axes(): def test_lossless_slice_scaling(tmp_path): fname = tmp_path / 'image.nii' img = nb.Nifti1Image(np.random.uniform(-20000, 20000, (5, 5, 5, 5)), affine=np.eye(4)) - img.header.set_data_dtype("int16") + img.header.set_data_dtype('int16') img.to_filename(fname) img1 = nb.load(fname) sliced_fname = tmp_path / 'sliced.nii' @@ -81,8 +81,9 @@ def test_lossless_slice_scaling(tmp_path): def test_lossless_slice_noscaling(tmp_path): fname = tmp_path / 'image.mgh' - img = nb.MGHImage(np.random.uniform(-20000, 20000, (5, 5, 5, 5)).astype("float32"), - affine=np.eye(4)) + img = nb.MGHImage( + np.random.uniform(-20000, 20000, (5, 5, 5, 5)).astype('float32'), affine=np.eye(4) + ) img.to_filename(fname) img1 = nb.load(fname) sliced_fname = tmp_path / 'sliced.mgh' @@ -95,7 +96,7 @@ def test_lossless_slice_noscaling(tmp_path): assert img1.dataobj.inter == img2.dataobj.inter -@pytest.mark.parametrize("inplace", (True, False)) +@pytest.mark.parametrize('inplace', (True, False)) def test_nib_roi(tmp_path, inplace): in_file = os.path.join(data_path, 'functional.nii') out_file = str(tmp_path / 'sliced.nii') @@ -117,11 +118,14 @@ def test_nib_roi(tmp_path, inplace): assert np.allclose(in_sliced.affine, out_img.affine) -@pytest.mark.parametrize("args, errmsg", ( - (("-i", "1:1"), "Cannot take zero-length slice"), - (("-j", "1::2"), "Downsampling is not supported"), - (("-t", "5::-1"), "Step entry not permitted"), -)) +@pytest.mark.parametrize( + 'args, errmsg', + ( + (('-i', '1:1'), 'Cannot take zero-length slice'), + (('-j', '1::2'), 'Downsampling is not supported'), + (('-t', '5::-1'), 'Step entry not permitted'), + ), +) def test_nib_roi_bad_slices(capsys, args, errmsg): in_file = os.path.join(data_path, 'functional.nii') @@ -133,20 +137,20 @@ def test_nib_roi_bad_slices(capsys, args, errmsg): def test_entrypoint(capsys): # Check that we handle missing args as expected - with mock.patch("sys.argv", ["nib-roi", "--help"]): + with mock.patch('sys.argv', ['nib-roi', '--help']): try: retval = main() except SystemExit: pass else: - assert False, "argparse exits on --help. If changing to another parser, update test." + assert False, 'argparse exits on --help. If changing to another parser, update test.' captured = capsys.readouterr() - assert captured.out.startswith("usage: nib-roi") + assert captured.out.startswith('usage: nib-roi') def test_nib_roi_unknown_axes(capsys): in_file = os.path.join(data_path, 'minc1_4d.mnc') with pytest.raises(ValueError): - main([in_file, os.devnull, "-i", ":"]) + main([in_file, os.devnull, '-i', ':']) captured = capsys.readouterr() - assert "Could not slice image." in captured.out + assert 'Could not slice image.' in captured.out diff --git a/nibabel/cmdline/tests/test_stats.py b/nibabel/cmdline/tests/test_stats.py index 1ceac90231..ced289cebb 100644 --- a/nibabel/cmdline/tests/test_stats.py +++ b/nibabel/cmdline/tests/test_stats.py @@ -22,15 +22,15 @@ def test_volume(tmpdir, capsys): mask_data[5:15, 5:15, 5:15] = 1 img = Nifti1Image(mask_data, np.eye(4)) - infile = tmpdir / "input.nii" + infile = tmpdir / 'input.nii' save(img, infile) - args = (f"{infile} --Volume") + args = f'{infile} --Volume' main(args.split()) vol_mm3 = capsys.readouterr() - args = (f"{infile} --Volume --units vox") + args = f'{infile} --Volume --units vox' main(args.split()) vol_vox = capsys.readouterr() assert float(vol_mm3[0]) == 1000.0 - assert int(vol_vox[0]) == 1000 \ No newline at end of file + assert int(vol_vox[0]) == 1000 diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 460f0d40d6..58cab3ba42 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -1,6 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Test scripts +"""Test scripts Test running scripts """ @@ -11,25 +11,32 @@ import numpy as np from nibabel.cmdline.utils import * from nibabel.cmdline.diff import * -from os.path import (join as pjoin) +from os.path import join as pjoin from nibabel.testing import data_path from collections import OrderedDict from io import StringIO def test_table2string(): - assert table2string([["A", "B", "C", "D"], ["E", "F", "G", "H"]]) == "A B C D\nE F G H\n" - assert table2string([["Let's", "Make", "Tests", "And"], ["Have", "Lots", "Of", "Fun"], - ["With", "Python", "Guys", "!"]]) == "Let's Make Tests And\n Have Lots Of Fun"+ \ - "\n With Python Guys !\n" + assert table2string([['A', 'B', 'C', 'D'], ['E', 'F', 'G', 'H']]) == 'A B C D\nE F G H\n' + assert ( + table2string( + [ + ["Let's", 'Make', 'Tests', 'And'], + ['Have', 'Lots', 'Of', 'Fun'], + ['With', 'Python', 'Guys', '!'], + ] + ) + == "Let's Make Tests And\n Have Lots Of Fun" + '\n With Python Guys !\n' + ) def test_ap(): - assert ap([1, 2], "%2d") == " 1, 2" - assert ap([1, 2], "%3d") == " 1, 2" - assert ap([1, 2], "%-2d") == "1 , 2 " - assert ap([1, 2], "%d", "+") == "1+2" - assert ap([1, 2, 3], "%d", "-") == "1-2-3" + assert ap([1, 2], '%2d') == ' 1, 2' + assert ap([1, 2], '%3d') == ' 1, 2' + assert ap([1, 2], '%-2d') == '1 , 2 ' + assert ap([1, 2], '%d', '+') == '1+2' + assert ap([1, 2, 3], '%d', '-') == '1-2-3' def test_safe_get(): @@ -43,76 +50,174 @@ def get_test(self): test = TestObject() test.test = 2 - assert safe_get(test, "test") == 2 - assert safe_get(test, "failtest") == "-" + assert safe_get(test, 'test') == 2 + assert safe_get(test, 'failtest') == '-' def test_get_headers_diff(): - fnames = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] + fnames = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) - expected_difference = OrderedDict([ - ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), - ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), - ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), - np.array([ 4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), - ("pixdim", [np.array([ 1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( - [ -1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, - 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), - ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), - ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), - ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), - ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), - np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), - ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("quatern_b", [np.array(0.0).astype(dtype="float32"), - np.array(-1.9451068140294884e-26).astype(dtype="float32")]), - ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), - ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), - ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), - ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), - ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), - ("srow_x", [np.array([ 1., 0., 0., 0.]).astype(dtype="float32"), - np.array([ -2.00000000e+00, 6.71471565e-19, 9.08102451e-18, - 1.17855103e+02]).astype(dtype="float32")]), - ("srow_y", [np.array([ 0., 3., 0., 0.]).astype(dtype="float32"), - np.array([ -6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype(dtype="float32")]), - ("srow_z", [np.array([ 0., 0., 2., 0.]).astype(dtype="float32"), - np.array([ 8.25548089e-18, 3.23207617e-01, 2.17108178e+00, - -7.24879837e+00]).astype(dtype="float32")])]) + expected_difference = OrderedDict( + [ + ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), + ( + 'dim_info', + [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + ), + ( + 'dim', + [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + ), + ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), + ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), + ( + 'pixdim', + [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( + [ + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + ), + ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), + ( + 'xyzt_units', + [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + ), + ( + 'cal_max', + [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + ), + ( + 'descrip', + [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + ), + ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ( + 'quatern_b', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + ), + ( + 'quatern_c', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + ), + ( + 'quatern_d', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + ), + ( + 'qoffset_x', + [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + ), + ( + 'qoffset_y', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + ), + ( + 'qoffset_z', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + ), + ( + 'srow_x', + [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_y', + [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_z', + [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array( + [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] + ).astype(dtype='float32'), + ], + ), + ] + ) np.testing.assert_equal(actual_difference, expected_difference) def test_display_diff(): - bogus_names = ["hellokitty.nii.gz", "privettovarish.nii.gz"] - - dict_values = OrderedDict([ - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]) - ]) - - expected_output = "These files are different.\n" + "Field/File 1:hellokitty.nii.gz" \ - " " \ - "2:privettovarish.nii.gz \n" \ - "datatype " \ - "2 " \ - "4 \n" \ - "bitpix " \ - "8 16" \ - " " \ - "\n" + bogus_names = ['hellokitty.nii.gz', 'privettovarish.nii.gz'] + + dict_values = OrderedDict( + [ + ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), + ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), + ] + ) + + expected_output = ( + 'These files are different.\n' + 'Field/File 1:hellokitty.nii.gz' + ' ' + '2:privettovarish.nii.gz \n' + 'datatype ' + '2 ' + '4 \n' + 'bitpix ' + '8 16' + ' ' + '\n' + ) assert display_diff(bogus_names, dict_values) == expected_output def test_get_data_diff(): # testing for identical files specifically as md5 may vary by computer - test_names = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'standard.nii.gz')] + test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] assert get_data_hash_diff(test_names) == [] # testing the maximum relative and absolute differences' different use cases @@ -123,27 +228,43 @@ def test_get_data_diff(): test_array_5 = np.arange(64).reshape(8, 8) # same shape, 2 files - assert get_data_diff([test_array, test_array_2]) == \ - OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])]) + assert get_data_diff([test_array, test_array_2]) == OrderedDict( + [('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])] + ) # same shape, 3 files - assert get_data_diff([test_array, test_array_2, test_array_3]) == \ - OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)]), - OrderedDict([('abs', 2), ('rel', 2.0)])]), - ('DATA(diff 2:)', [None, None, - OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])])]) + assert get_data_diff([test_array, test_array_2, test_array_3]) == OrderedDict( + [ + ( + 'DATA(diff 1:)', + [ + None, + OrderedDict([('abs', 1), ('rel', 2.0)]), + OrderedDict([('abs', 2), ('rel', 2.0)]), + ], + ), + ( + 'DATA(diff 2:)', + [None, None, OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])], + ), + ] + ) # same shape, 2 files, modified maximum abs/rel assert get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2) == OrderedDict() # different shape, 2 files - assert get_data_diff([test_array_2, test_array_4]) == \ - OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}])]) + assert get_data_diff([test_array_2, test_array_4]) == OrderedDict( + [('DATA(diff 1:)', [None, {'CMP': 'incompat'}])] + ) # different shape, 3 files - assert get_data_diff([test_array_4, test_array_5, test_array_2]) == \ - OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), - ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}])]) + assert get_data_diff([test_array_4, test_array_5, test_array_2]) == OrderedDict( + [ + ('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), + ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}]), + ] + ) test_return = get_data_diff([test_array, test_array_2], dtype=np.float32) assert type(test_return['DATA(diff 1:)'][1]['abs']) is np.float32 @@ -157,42 +278,139 @@ def test_get_data_diff(): def test_main(): - test_names = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] - expected_difference = OrderedDict([ - ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), - ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), - ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), - ("pixdim", [np.array([1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( - [-1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, - 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), - ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), - ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), - ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), - ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), - np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), - ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("quatern_b", [np.array(0.0).astype(dtype="float32"), - np.array(-1.9451068140294884e-26).astype(dtype="float32")]), - ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), - ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), - ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), - ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), - ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), - ("srow_x", [np.array([1., 0., 0., 0.]).astype(dtype="float32"), - np.array([-2.00000000e+00, 6.71471565e-19, 9.08102451e-18, - 1.17855103e+02]).astype(dtype="float32")]), - ("srow_y", [np.array([0., 3., 0., 0.]).astype(dtype="float32"), - np.array([-6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype( - dtype="float32")]), - ("srow_z", [np.array([0., 0., 2., 0.]).astype(dtype="float32"), - np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e+00, - -7.24879837e+00]).astype(dtype="float32")]), - ('DATA(md5)', ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'])]) + test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] + expected_difference = OrderedDict( + [ + ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), + ( + 'dim_info', + [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + ), + ( + 'dim', + [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + ), + ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), + ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), + ( + 'pixdim', + [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( + [ + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + ), + ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), + ( + 'xyzt_units', + [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + ), + ( + 'cal_max', + [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + ), + ( + 'descrip', + [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + ), + ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ( + 'quatern_b', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + ), + ( + 'quatern_c', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + ), + ( + 'quatern_d', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + ), + ( + 'qoffset_x', + [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + ), + ( + 'qoffset_y', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + ), + ( + 'qoffset_z', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + ), + ( + 'srow_x', + [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_y', + [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_z', + [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array( + [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] + ).astype(dtype='float32'), + ], + ), + ( + 'DATA(md5)', + ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], + ), + ] + ) with pytest.raises(SystemExit): np.testing.assert_equal(main(test_names, StringIO()), expected_difference) @@ -200,4 +418,4 @@ def test_main(): test_names_2 = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] with pytest.raises(SystemExit): - assert main(test_names_2, StringIO()) == "These files are identical." + assert main(test_names_2, StringIO()) == 'These files are identical.' diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index bddb58c7b1..cc364af06d 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -9,12 +9,14 @@ def parse_args(): - DESCRIPTION = "Convert tractograms (TRK -> TCK)." + DESCRIPTION = 'Convert tractograms (TRK -> TCK).' parser = argparse.ArgumentParser(description=DESCRIPTION) - parser.add_argument("tractograms", metavar="tractogram", nargs="+", - help="list of tractograms (.trk).") - parser.add_argument("-f", "--force", action="store_true", - help="overwrite existing output files.") + parser.add_argument( + 'tractograms', metavar='tractogram', nargs='+', help='list of tractograms (.trk).' + ) + parser.add_argument( + '-f', '--force', action='store_true', help='overwrite existing output files.' + ) args = parser.parse_args() return args, parser diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index e6aa0a2fb5..41b10d6b31 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -29,8 +29,7 @@ def _err(msg=None): def verbose(thing, msg): - """Print `s` if `thing` is less than the `verbose_level` - """ + """Print `s` if `thing` is less than the `verbose_level`""" # TODO: consider using nibabel's logger if thing <= verbose_level: print(' ' * thing + msg) @@ -56,9 +55,7 @@ def table2string(table, out=None): out = StringIO() # equalize number of elements in each row - nelements_max = \ - len(table) and \ - max(len(x) for x in table) + nelements_max = len(table) and max(len(x) for x in table) for i, table_ in enumerate(table): table[i] += [''] * (nelements_max - len(table_)) @@ -67,11 +64,10 @@ def table2string(table, out=None): atable = np.asarray(table) # eat whole entry while computing width for @w (for wide) markup_strip = re.compile('^@([lrc]|w.*)') - col_width = [max([len(markup_strip.sub('', x)) - for x in column]) for column in atable.T] - string = "" + col_width = [max([len(markup_strip.sub('', x)) for x in column]) for column in atable.T] + string = '' for i, table_ in enumerate(table): - string_ = "" + string_ = '' for j, item in enumerate(table_): item = str(item) if item.startswith('@'): @@ -94,8 +90,7 @@ def table2string(table, out=None): else: raise RuntimeError(f'Should not get here with align={align}') - string_ += "%%%ds%%s%%%ds " \ - % (nspacesl, nspacesr) % ('', item, '') + string_ += '%%%ds%%s%%%ds ' % (nspacesl, nspacesr) % ('', item, '') string += string_.rstrip() + '\n' out.write(string) @@ -114,11 +109,10 @@ def ap(helplist, format_, sep=', '): def safe_get(obj, name): - """A getattr which would return '-' if getattr fails - """ + """A getattr which would return '-' if getattr fails""" try: f = getattr(obj, 'get_' + name) return f() except Exception as e: - verbose(2, f"get_{name}() failed -- {e}") + verbose(2, f'get_{name}() failed -- {e}') return '-' diff --git a/nibabel/data.py b/nibabel/data.py index f3773d3241..b29476a2d2 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -2,7 +2,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utilities to find files from NIPY data packages - """ import os from os.path import join as pjoin @@ -14,8 +13,9 @@ from .environment import get_nipy_user_dir, get_nipy_system_dir -DEFAULT_INSTALL_HINT = ('If you have the package, have you set the ' - 'path to the package correctly?') +DEFAULT_INSTALL_HINT = ( + 'If you have the package, have you set the ' 'path to the package correctly?' +) class DataError(Exception): @@ -23,19 +23,20 @@ class DataError(Exception): class BomberError(DataError, AttributeError): - """ Error when trying to access Bomber instance + """Error when trying to access Bomber instance Should be instance of AttributeError to allow Python 3 inspect to do various ``hasattr`` checks without raising an error """ + pass class Datasource: - """ Simple class to add base path to relative path """ + """Simple class to add base path to relative path""" def __init__(self, base_path): - """ Initialize datasource + """Initialize datasource Parameters ---------- @@ -53,7 +54,7 @@ def __init__(self, base_path): self.base_path = base_path def get_filename(self, *path_parts): - """ Prepend base path to `*path_parts` + """Prepend base path to `*path_parts` We make no check whether the returned path exists. @@ -71,36 +72,34 @@ def get_filename(self, *path_parts): return pjoin(self.base_path, *path_parts) def list_files(self, relative=True): - """ Recursively list the files in the data source directory. + """Recursively list the files in the data source directory. - Parameters - ---------- - relative: bool, optional - If True, path returned are relative to the base path of - the data source. + Parameters + ---------- + relative: bool, optional + If True, path returned are relative to the base path of + the data source. - Returns - ------- - file_list: list of strings - List of the paths of all the files in the data source. + Returns + ------- + file_list: list of strings + List of the paths of all the files in the data source. """ out_list = list() for base, dirs, files in os.walk(self.base_path): if relative: - base = base[len(self.base_path) + 1:] + base = base[len(self.base_path) + 1 :] for filename in files: out_list.append(pjoin(base, filename)) return out_list class VersionedDatasource(Datasource): - """ Datasource with version information in config file - - """ + """Datasource with version information in config file""" def __init__(self, base_path, config_filename=None): - """ Initialize versioned datasource + """Initialize versioned datasource We assume that there is a configuration file with version information in datasource directory tree. @@ -136,12 +135,11 @@ def __init__(self, base_path, config_filename=None): version_parts = self.version.split('.') self.major_version = int(version_parts[0]) self.minor_version = int(version_parts[1]) - self.version_no = float('%d.%d' % (self.major_version, - self.minor_version)) + self.version_no = float('%d.%d' % (self.major_version, self.minor_version)) def _cfg_value(fname, section='DATA', value='path'): - """ Utility function to fetch value from config file """ + """Utility function to fetch value from config file""" configp = configparser.ConfigParser() readfiles = configp.read(fname) if not readfiles: @@ -153,7 +151,7 @@ def _cfg_value(fname, section='DATA', value='path'): def get_data_path(): - """ Return specified or guessed locations of NIPY data files + """Return specified or guessed locations of NIPY data files The algorithm is to return paths, extracted from strings, where strings are found in the following order: @@ -217,7 +215,7 @@ def get_data_path(): def find_data_dir(root_dirs, *names): - """ Find relative path given path prefixes to search + """Find relative path given path prefixes to search We raise a DataError if we can't find the relative path @@ -240,12 +238,14 @@ def find_data_dir(root_dirs, *names): pth = pjoin(path, ds_relative) if os.path.isdir(pth): return pth - raise DataError(f'Could not find datasource "{ds_relative}" in ' - f'data path "{os.path.pathsep.join(root_dirs)}"') + raise DataError( + f'Could not find datasource "{ds_relative}" in ' + f'data path "{os.path.pathsep.join(root_dirs)}"' + ) def make_datasource(pkg_def, **kwargs): - """ Return datasource defined by `pkg_def` as found in `data_path` + """Return datasource defined by `pkg_def` as found in `data_path` `data_path` is the only allowed keyword argument. @@ -290,8 +290,7 @@ def make_datasource(pkg_def, **kwargs): try: pth = find_data_dir(data_path, *names) except DataError as e: - pth = [pjoin(this_data_path, *names) - for this_data_path in data_path] + pth = [pjoin(this_data_path, *names) for this_data_path in data_path] pkg_hint = pkg_def.get('install hint', DEFAULT_INSTALL_HINT) msg = f'{e}; Is it possible you have not installed a data package?' if 'name' in pkg_def: @@ -303,21 +302,22 @@ def make_datasource(pkg_def, **kwargs): class Bomber: - """ Class to raise an informative error when used """ + """Class to raise an informative error when used""" def __init__(self, name, msg): self.name = name self.msg = msg def __getattr__(self, attr_name): - """ Raise informative error accessing not-found attributes """ + """Raise informative error accessing not-found attributes""" raise BomberError( f'Trying to access attribute "{attr_name}" of ' - f'non-existent data "{self.name}"\n\n{self.msg}\n') + f'non-existent data "{self.name}"\n\n{self.msg}\n' + ) def datasource_or_bomber(pkg_def, **options): - """ Return a viable datasource or a Bomber + """Return a viable datasource or a Bomber This is to allow module level creation of datasource objects. We create the objects, so that, if the data exist, and are the correct @@ -355,5 +355,5 @@ def datasource_or_bomber(pkg_def, **options): pkg_name = pkg_def['name'] else: pkg_name = 'data at ' + unix_relpath - msg = f"{pkg_name} is version {ds.version} but we need version >= {version}\n\n{pkg_hint}" + msg = f'{pkg_name} is version {ds.version} but we need version >= {version}\n\n{pkg_hint}' return Bomber(sys_relpath, DataError(msg)) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index f1c6b663c0..f8df06157b 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -1,4 +1,4 @@ -""" File-based images that have data arrays +"""File-based images that have data arrays The class:`DataObjImage` class defines an image that extends the :class:`FileBasedImage` by adding an array-like object, named ``dataobj``. @@ -15,10 +15,10 @@ class DataobjImage(FileBasedImage): - """ Template class for images that have dataobj data stores""" + """Template class for images that have dataobj data stores""" def __init__(self, dataobj, header=None, extra=None, file_map=None): - """ Initialize dataobj image + """Initialize dataobj image The datobj image is a combination of (dataobj, header), with optional metadata in `extra`, and filename / file-like objects contained in the @@ -38,8 +38,7 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): file_map : mapping, optional mapping giving file information for this image format """ - super(DataobjImage, self).__init__(header=header, extra=extra, - file_map=file_map) + super(DataobjImage, self).__init__(header=header, extra=extra, file_map=file_map) self._dataobj = dataobj self._fdata_cache = None self._data_cache = None @@ -48,13 +47,16 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): def dataobj(self): return self._dataobj - @deprecate_with_version('get_data() is deprecated in favor of get_fdata(),' - ' which has a more predictable return type. To ' - 'obtain get_data() behavior going forward, use ' - 'numpy.asanyarray(img.dataobj).', - '3.0', '5.0') + @deprecate_with_version( + 'get_data() is deprecated in favor of get_fdata(),' + ' which has a more predictable return type. To ' + 'obtain get_data() behavior going forward, use ' + 'numpy.asanyarray(img.dataobj).', + '3.0', + '5.0', + ) def get_data(self, caching='fill'): - """ Return image data from image with any necessary scaling applied + """Return image data from image with any necessary scaling applied .. WARNING:: @@ -203,7 +205,7 @@ def get_data(self, caching='fill'): return data def get_fdata(self, caching='fill', dtype=np.float64): - """ Return floating point image data with necessary scaling applied + """Return floating point image data with necessary scaling applied The image ``dataobj`` property can be an array proxy or an array. An array proxy is an object that knows how to load the image data from @@ -352,17 +354,19 @@ def get_fdata(self, caching='fill', dtype=np.float64): @property def in_memory(self): - """ True when any array data is in memory cache + """True when any array data is in memory cache There are separate caches for `get_data` reads and `get_fdata` reads. This property is True if either of those caches are set. """ - return (isinstance(self._dataobj, np.ndarray) or - self._fdata_cache is not None or - self._data_cache is not None) + return ( + isinstance(self._dataobj, np.ndarray) + or self._fdata_cache is not None + or self._data_cache is not None + ) def uncache(self): - """ Delete any cached read of data from proxied data + """Delete any cached read of data from proxied data Remember there are two types of images: @@ -399,7 +403,7 @@ def ndim(self): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -461,7 +465,6 @@ def from_filename(klass, filename, *, mmap=True, keep_file_open=None): if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) + return klass.from_file_map(file_map, mmap=mmap, keep_file_open=keep_file_open) load = from_filename diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 576d18b5ce..900c0fcf4d 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -1,4 +1,4 @@ -""" Module to help with deprecating objects and classes +"""Module to help with deprecating objects and classes """ import warnings @@ -8,7 +8,7 @@ class ModuleProxy: - """ Proxy for module that may not yet have been imported + """Proxy for module that may not yet have been imported Parameters ---------- @@ -36,11 +36,11 @@ def __getattr__(self, key): return getattr(mod, key) def __repr__(self): - return f"" + return f'' class FutureWarningMixin: - """ Insert FutureWarning for object creation + """Insert FutureWarning for object creation Examples -------- @@ -55,17 +55,16 @@ class FutureWarningMixin: ... warns[0].message.args[0] "Please, don't use this class" """ + warn_message = 'This class will be removed in future versions' def __init__(self, *args, **kwargs): - warnings.warn(self.warn_message, - FutureWarning, - stacklevel=2) + warnings.warn(self.warn_message, FutureWarning, stacklevel=2) super(FutureWarningMixin, self).__init__(*args, **kwargs) class VisibleDeprecationWarning(UserWarning): - """ Deprecation warning that will be shown by default + """Deprecation warning that will be shown by default Python >= 2.7 does not show standard DeprecationWarnings by default: @@ -73,6 +72,7 @@ class VisibleDeprecationWarning(UserWarning): Use this class for cases where we do want to show deprecations by default. """ + pass diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 031a05e601..7b4ef5221f 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -1,4 +1,4 @@ -""" Class for recording and reporting deprecations +"""Class for recording and reporting deprecations """ import functools @@ -29,16 +29,17 @@ class ExpiredDeprecationError(RuntimeError): - """ Error for expired deprecation + """Error for expired deprecation Error raised when a called function or method has passed out of its deprecation period. """ + pass def _ensure_cr(text): - """ Remove trailing whitespace and add carriage return + """Remove trailing whitespace and add carriage return Ensures that `text` always ends with a carriage return """ @@ -46,7 +47,7 @@ def _ensure_cr(text): def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): - """ Add deprecation message `dep_doc` to docstring in `old_doc` + """Add deprecation message `dep_doc` to docstring in `old_doc` Parameters ---------- @@ -79,12 +80,13 @@ def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): setup_lines = [indent + L for L in setup.splitlines()] dep_lines = [indent + L for L in [''] + dep_doc.splitlines() + ['']] cleanup_lines = [indent + L for L in cleanup.splitlines()] - return '\n'.join(new_lines + dep_lines + setup_lines + - old_lines[next_line:] + cleanup_lines + ['']) + return '\n'.join( + new_lines + dep_lines + setup_lines + old_lines[next_line:] + cleanup_lines + [''] + ) class Deprecator: - """ Class to make decorator marking function or method as deprecated + """Class to make decorator marking function or method as deprecated The decorated function / method will: @@ -109,16 +111,18 @@ class Deprecator: given argument of ``until`` in the ``__call__`` method (see below). """ - def __init__(self, - version_comparator, - warn_class=DeprecationWarning, - error_class=ExpiredDeprecationError): + def __init__( + self, + version_comparator, + warn_class=DeprecationWarning, + error_class=ExpiredDeprecationError, + ): self.version_comparator = version_comparator self.warn_class = warn_class self.error_class = error_class def is_bad_version(self, version_str): - """ Return True if `version_str` is too high + """Return True if `version_str` is too high Tests `version_str` with ``self.version_comparator`` @@ -135,9 +139,8 @@ def is_bad_version(self, version_str): """ return self.version_comparator(version_str) == -1 - def __call__(self, message, since='', until='', - warn_class=None, error_class=None): - """ Return decorator function function for deprecation warning / error + def __call__(self, message, since='', until='', warn_class=None, error_class=None): + """Return decorator function function for deprecation warning / error Parameters ---------- @@ -169,12 +172,13 @@ def __call__(self, message, since='', until='', if since: messages.append('* deprecated from version: ' + since) if until: - messages.append(f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} " - f"{error_class} as of version: {until}") + messages.append( + f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} " + f'{error_class} as of version: {until}' + ) message = '\n'.join(messages) def deprecator(func): - @functools.wraps(func) def deprecated_func(*args, **kwargs): if until and self.is_bad_version(until): diff --git a/nibabel/dft.py b/nibabel/dft.py index 51b6424a84..fd944a2556 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -7,7 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove -""" DICOM filesystem tools +"""DICOM filesystem tools """ @@ -27,26 +27,26 @@ from .nifti1 import Nifti1Header from nibabel.optpkg import optional_package -pydicom = optional_package("pydicom")[0] +pydicom = optional_package('pydicom')[0] logger = logging.getLogger('nibabel.dft') class DFTError(Exception): - "base class for DFT exceptions" + """base class for DFT exceptions""" class CachingError(DFTError): - "error while caching" + """error while caching""" class VolumeError(DFTError): - "unsupported volume parameter" + """unsupported volume parameter""" class InstanceStackError(DFTError): - "bad series of instance numbers" + """bad series of instance numbers""" def __init__(self, series, i, si): self.series = series @@ -59,7 +59,6 @@ def __str__(self): class _Study: - def __init__(self, d): self.uid = d['uid'] self.date = d['date'] @@ -76,7 +75,7 @@ def __getattribute__(self, name): if name == 'series' and val is None: val = [] with DB.readonly_cursor() as c: - c.execute("SELECT * FROM series WHERE study = ?", (self.uid, )) + c.execute('SELECT * FROM series WHERE study = ?', (self.uid,)) cols = [el[0] for el in c.description] for row in c: d = dict(zip(cols, row)) @@ -91,7 +90,6 @@ def patient_name_or_uid(self): class _Series: - def __init__(self, d): self.uid = d['uid'] self.study = d['study'] @@ -112,7 +110,7 @@ def __getattribute__(self, name): FROM storage_instance WHERE series = ? ORDER BY instance_number""" - c.execute(query, (self.uid, )) + c.execute(query, (self.uid,)) cols = [el[0] for el in c.description] for row in c: d = dict(zip(cols, row)) @@ -122,6 +120,7 @@ def __getattribute__(self, name): def as_png(self, index=None, scale_to_slice=True): import PIL.Image + # For compatibility with older versions of PIL that did not # have `frombytes`: if hasattr(PIL.Image, 'frombytes'): @@ -160,8 +159,9 @@ def as_nifti(self): raise VolumeError('unsupported bits allocated') if self.bits_stored != 12: raise VolumeError('unsupported bits stored') - data = numpy.ndarray((len(self.storage_instances), self.rows, - self.columns), dtype=numpy.int16) + data = numpy.ndarray( + (len(self.storage_instances), self.rows, self.columns), dtype=numpy.int16 + ) for (i, si) in enumerate(self.storage_instances): if i + 1 != si.instance_number: raise InstanceStackError(self, i, si) @@ -192,10 +192,12 @@ def as_nifti(self): cosk = pos_n - pos_1 cosk = cosk / numpy.linalg.norm(cosk) - m = ((pdi * cosi[0], pdj * cosj[0], pdk * cosk[0], pos_1[0]), - (pdi * cosi[1], pdj * cosj[1], pdk * cosk[1], pos_1[1]), - (pdi * cosi[2], pdj * cosj[2], pdk * cosk[2], pos_1[2]), - (0, 0, 0, 1)) + m = ( + (pdi * cosi[0], pdj * cosj[0], pdk * cosk[0], pos_1[0]), + (pdi * cosi[1], pdj * cosj[1], pdk * cosk[1], pos_1[1]), + (pdi * cosi[2], pdj * cosj[2], pdk * cosk[2], pos_1[2]), + (0, 0, 0, 1), + ) # Values are python Decimals in pydicom 0.9.7 m = numpy.array(m, dtype=float) @@ -205,8 +207,7 @@ def as_nifti(self): hdr.set_qform(m, 1) hdr.set_xyzt_units(2, 8) hdr.set_data_dtype(numpy.int16) - hdr.set_data_shape((self.columns, self.rows, - len(self.storage_instances))) + hdr.set_data_shape((self.columns, self.rows, len(self.storage_instances))) s = BytesIO() hdr.write_to(s) @@ -218,7 +219,6 @@ def nifti_size(self): class _StorageInstance: - def __init__(self, d): self.uid = d['uid'] self.instance_number = d['instance_number'] @@ -233,7 +233,7 @@ def __getattribute__(self, name): FROM file WHERE storage_instance = ? ORDER BY directory, name""" - c.execute(query, (self.uid, )) + c.execute(query, (self.uid,)) val = ['%s/%s' % tuple(row) for row in c] self.files = val return val @@ -262,25 +262,24 @@ def update_cache(base_dir, followlinks=False): os.stat(d) mtimes[d] = os.stat(d).st_mtime with DB.readwrite_cursor() as c: - c.execute("SELECT path, mtime FROM directory") + c.execute('SELECT path, mtime FROM directory') db_mtimes = dict(c) - c.execute("SELECT uid FROM study") + c.execute('SELECT uid FROM study') studies = [row[0] for row in c] - c.execute("SELECT uid FROM series") + c.execute('SELECT uid FROM series') series = [row[0] for row in c] - c.execute("SELECT uid FROM storage_instance") + c.execute('SELECT uid FROM storage_instance') storage_instances = [row[0] for row in c] for dir in sorted(mtimes.keys()): if dir in db_mtimes and mtimes[dir] <= db_mtimes[dir]: continue logger.debug(f'updating {dir}') - _update_dir(c, dir, files_by_dir[dir], studies, series, - storage_instances) + _update_dir(c, dir, files_by_dir[dir], studies, series, storage_instances) if dir in db_mtimes: - query = "UPDATE directory SET mtime = ? WHERE path = ?" + query = 'UPDATE directory SET mtime = ? WHERE path = ?' c.execute(query, (mtimes[dir], dir)) else: - query = "INSERT INTO directory (path, mtime) VALUES (?, ?)" + query = 'INSERT INTO directory (path, mtime) VALUES (?, ?)' c.execute(query, (dir, mtimes[dir])) @@ -289,7 +288,7 @@ def get_studies(base_dir=None, followlinks=False): update_cache(base_dir, followlinks) if base_dir is None: with DB.readonly_cursor() as c: - c.execute("SELECT * FROM study") + c.execute('SELECT * FROM study') studies = [] cols = [el[0] for el in c.description] for row in c: @@ -306,12 +305,12 @@ def get_studies(base_dir=None, followlinks=False): with DB.readonly_cursor() as c: study_uids = {} for dir in _get_subdirs(base_dir, followlinks=followlinks): - c.execute(query, (dir, )) + c.execute(query, (dir,)) for row in c: study_uids[row[0]] = None studies = [] for uid in study_uids: - c.execute("SELECT * FROM study WHERE uid = ?", (uid, )) + c.execute('SELECT * FROM study WHERE uid = ?', (uid,)) cols = [el[0] for el in c.description] d = dict(zip(cols, c.fetchone())) studies.append(_Study(d)) @@ -320,21 +319,19 @@ def get_studies(base_dir=None, followlinks=False): def _update_dir(c, dir, files, studies, series, storage_instances): logger.debug(f'Updating directory {dir}') - c.execute("SELECT name, mtime FROM file WHERE directory = ?", (dir, )) + c.execute('SELECT name, mtime FROM file WHERE directory = ?', (dir,)) db_mtimes = dict(c) for fname in db_mtimes: if fname not in files: logger.debug(f' remove {fname}') - c.execute("DELETE FROM file WHERE directory = ? AND name = ?", - (dir, fname)) + c.execute('DELETE FROM file WHERE directory = ? AND name = ?', (dir, fname)) for fname in files: mtime = os.lstat(f'{dir}/{fname}').st_mtime if fname in db_mtimes and mtime <= db_mtimes[fname]: logger.debug(f' okay {fname}') else: logger.debug(f' update {fname}') - si_uid = _update_file(c, dir, fname, studies, series, - storage_instances) + si_uid = _update_file(c, dir, fname, studies, series, storage_instances) if fname not in db_mtimes: query = """INSERT INTO file (directory, name, @@ -371,14 +368,16 @@ def _update_file(c, path, fname, studies, series, storage_instances): patient_birth_date, patient_sex) VALUES (?, ?, ?, ?, ?, ?, ?, ?)""" - params = (str(do.StudyInstanceUID), - do.StudyDate, - do.StudyTime, - study_comments, - str(do.PatientName), - do.PatientID, - do.PatientBirthDate, - do.PatientSex) + params = ( + str(do.StudyInstanceUID), + do.StudyDate, + do.StudyTime, + study_comments, + str(do.PatientName), + do.PatientID, + do.PatientBirthDate, + do.PatientSex, + ) c.execute(query, params) studies.append(str(do.StudyInstanceUID)) if str(do.SeriesInstanceUID) not in series: @@ -391,21 +390,22 @@ def _update_file(c, path, fname, studies, series, storage_instances): bits_allocated, bits_stored) VALUES (?, ?, ?, ?, ?, ?, ?, ?)""" - params = (str(do.SeriesInstanceUID), - str(do.StudyInstanceUID), - do.SeriesNumber, - do.SeriesDescription, - do.Rows, - do.Columns, - do.BitsAllocated, - do.BitsStored) + params = ( + str(do.SeriesInstanceUID), + str(do.StudyInstanceUID), + do.SeriesNumber, + do.SeriesDescription, + do.Rows, + do.Columns, + do.BitsAllocated, + do.BitsStored, + ) c.execute(query, params) series.append(str(do.SeriesInstanceUID)) if str(do.SOPInstanceUID) not in storage_instances: query = """INSERT INTO storage_instance (uid, instance_number, series) VALUES (?, ?, ?)""" - params = (str(do.SOPInstanceUID), do.InstanceNumber, - str(do.SeriesInstanceUID)) + params = (str(do.SOPInstanceUID), do.InstanceNumber, str(do.SeriesInstanceUID)) c.execute(query, params) storage_instances.append(str(do.SOPInstanceUID)) except AttributeError as data: @@ -416,11 +416,11 @@ def _update_file(c, path, fname, studies, series, storage_instances): def clear_cache(): with DB.readwrite_cursor() as c: - c.execute("DELETE FROM file") - c.execute("DELETE FROM directory") - c.execute("DELETE FROM storage_instance") - c.execute("DELETE FROM series") - c.execute("DELETE FROM study") + c.execute('DELETE FROM file') + c.execute('DELETE FROM directory') + c.execute('DELETE FROM storage_instance') + c.execute('DELETE FROM series') + c.execute('DELETE FROM study') CREATE_QUERIES = ( @@ -449,7 +449,8 @@ def clear_cache(): name TEXT NOT NULL, mtime INTEGER NOT NULL, storage_instance TEXT DEFAULT NULL REFERENCES storage_instance, - PRIMARY KEY (directory, name))""") + PRIMARY KEY (directory, name))""", +) class _DB: @@ -473,7 +474,7 @@ def _init_db(self): if self.verbose: logger.info('db filename: ' + self.fname) - self._session = sqlite3.connect(self.fname, isolation_level="EXCLUSIVE") + self._session = sqlite3.connect(self.fname, isolation_level='EXCLUSIVE') with self.readwrite_cursor() as c: c.execute("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'") if c.fetchone()[0] == 0: @@ -482,7 +483,7 @@ def _init_db(self): c.execute(q) def __repr__(self): - return f"" + return f'' @contextlib.contextmanager def readonly_cursor(self): diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 54f600f147..f72a81d5a4 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read ECAT format images +"""Read ECAT format images An ECAT format image consists of: @@ -48,8 +48,7 @@ import numpy as np -from .volumeutils import (native_code, swapped_code, make_dt_codes, - array_from_file) +from .volumeutils import native_code, swapped_code, make_dt_codes, array_from_file from .spatialimages import SpatialImage from .arraywriters import make_array_writer from .wrapstruct import WrapStruct @@ -117,7 +116,7 @@ ('well_counter_corr_factor', np.float32), ('data_units', '32S'), ('septa_state', np.uint16), - ('fill', '12S') + ('fill', '12S'), ] hdr_dtype = np.dtype(main_header_dtd) @@ -183,7 +182,8 @@ ('recon_type', np.uint16), ('recon_views', np.uint16), ('fill', '174S'), - ('fill2', '96S')] + ('fill2', '96S'), +] subhdr_dtype = np.dtype(subheader_dtd) # Ecat Data Types @@ -199,7 +199,8 @@ (4, 'ECAT7_VAXR4', np.float32), (5, 'ECAT7_IEEER4', np.float32), (6, 'ECAT7_SUNI2', np.int16), - (7, 'ECAT7_SUNI4', np.int32)) + (7, 'ECAT7_SUNI4', np.int32), +) data_type_codes = make_dt_codes(_dtdefs) @@ -219,7 +220,8 @@ (11, 'ECAT7_3DSCAN'), (12, 'ECAT7_3DSCAN8'), (13, 'ECAT7_3DNORM'), - (14, 'ECAT7_3DSCANFIT')) + (14, 'ECAT7_3DSCANFIT'), +) file_type_codes = dict(ft_defs) patient_orient_defs = ( # code, description @@ -231,7 +233,8 @@ (5, 'ECAT7_Head_First_Decubitus_Right'), (6, 'ECAT7_Feet_First_Decubitus_Left'), (7, 'ECAT7_Head_First_Decubitus_Left'), - (8, 'ECAT7_Unknown_Orientation')) + (8, 'ECAT7_Unknown_Orientation'), +) patient_orient_codes = dict(patient_orient_defs) # Indexes from the patient_orient_defs structure defined above for the @@ -255,14 +258,12 @@ class EcatHeader(WrapStruct): This just reads the main Ecat Header, it does not load the data or read the mlist or any sub headers """ + template_dtype = hdr_dtype _ft_codes = file_type_codes _patient_orient_codes = patient_orient_codes - def __init__(self, - binaryblock=None, - endianness=None, - check=True): + def __init__(self, binaryblock=None, endianness=None, check=True): """Initialize Ecat header from bytes object Parameters @@ -281,8 +282,7 @@ def __init__(self, @classmethod def guessed_endian(klass, hdr): - """Guess endian from MAGIC NUMBER value of header data - """ + """Guess endian from MAGIC NUMBER value of header data""" if not hdr['sw_version'] == 74: return swapped_code else: @@ -290,8 +290,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - """ Return header data for empty header with given endianness - """ + """Return header data for empty header with given endianness""" hdr_data = super(EcatHeader, klass).default_structarr(endianness) hdr_data['magic_number'] = 'MATRIX72' hdr_data['sw_version'] = 74 @@ -301,11 +300,11 @@ def default_structarr(klass, endianness=None): return hdr_data def get_data_dtype(self): - """ Get numpy dtype for data from header""" - raise NotImplementedError("dtype is only valid from subheaders") + """Get numpy dtype for data from header""" + raise NotImplementedError('dtype is only valid from subheaders') def get_patient_orient(self): - """ gets orientation of patient based on code stored + """gets orientation of patient based on code stored in header, not always reliable """ code = self._structarr['patient_orientation'].item() @@ -314,7 +313,7 @@ def get_patient_orient(self): return self._patient_orient_codes[code] def get_filetype(self): - """ Type of ECAT Matrix File from code stored in header""" + """Type of ECAT Matrix File from code stored in header""" code = self._structarr['file_type'].item() if code not in self._ft_codes: raise KeyError('Ecat Filetype CODE %d not recognized' % code) @@ -322,12 +321,12 @@ def get_filetype(self): @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ + """Return sequence of check functions for this class""" return () def read_mlist(fileobj, endianness): - """ read (nframes, 4) matrix list array from `fileobj` + """read (nframes, 4) matrix list array from `fileobj` Parameters ---------- @@ -387,7 +386,7 @@ def read_mlist(fileobj, endianness): mlist = [] return mlist # Use all but first housekeeping row - mlists.append(rows[1:n_rows + 1]) + mlists.append(rows[1 : n_rows + 1]) mlist_index += n_rows if mlist_block_no <= 2: # should block_no in (1, 2) be an error? break @@ -424,9 +423,13 @@ def get_frame_order(mlist): valid_order = np.argsort(ids) if not all(valid_order == sorted(valid_order)): # raise UserWarning if Frames stored out of order - warnings.warn_explicit(f'Frames stored out of order; true order = {valid_order}\n' - 'frames will be accessed in order STORED, NOT true order', - UserWarning, 'ecat', 0) + warnings.warn_explicit( + f'Frames stored out of order; true order = {valid_order}\n' + 'frames will be accessed in order STORED, NOT true order', + UserWarning, + 'ecat', + 0, + ) id_dict = {} for i in range(n_valid): id_dict[i] = [valid_order[i], ids[valid_order[i]]] @@ -434,7 +437,7 @@ def get_frame_order(mlist): def get_series_framenumbers(mlist): - """ Returns framenumber of data as it was collected, + """Returns framenumber of data as it was collected, as part of a series; not just the order of how it was stored in this or across other files @@ -475,7 +478,7 @@ def get_series_framenumbers(mlist): def read_subheaders(fileobj, mlist, endianness): - """ Retrieve all subheaders and return list of subheader recarrays + """Retrieve all subheaders and return list of subheader recarrays Parameters ---------- @@ -535,7 +538,7 @@ def __init__(self, hdr, mlist, fileobj): self.subheaders = read_subheaders(fileobj, mlist, hdr.endianness) def get_shape(self, frame=0): - """ returns shape of given frame""" + """returns shape of given frame""" subhdr = self.subheaders[frame] x = subhdr['x_dimension'].item() y = subhdr['y_dimension'].item() @@ -574,8 +577,7 @@ def get_frame_affine(self, frame=0): # get translations from center of image origin_offset = (np.array(dims) - 1) / 2.0 aff = np.diag(zooms) - aff[:3, -1] = -origin_offset * zooms[:-1] + np.array([x_off, y_off, - z_off]) + aff[:3, -1] = -origin_offset * zooms[:-1] + np.array([x_off, y_off, z_off]) return aff def get_zooms(self, frame=0): @@ -659,7 +661,7 @@ def data_from_fileobj(self, frame=0, orientation=None): class EcatImageArrayProxy: - """ Ecat implementation of array proxy protocol + """Ecat implementation of array proxy protocol The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. @@ -685,7 +687,7 @@ def is_proxy(self): return True def __array__(self, dtype=None): - """ Read of data from file + """Read of data from file This reads ALL FRAMES into one array, can be memory expensive. @@ -706,15 +708,13 @@ def __array__(self, dtype=None): data = np.empty(self.shape) frame_mapping = get_frame_order(self._subheader._mlist) for i in sorted(frame_mapping): - data[:, :, :, i] = self._subheader.data_from_fileobj( - frame_mapping[i][0]) + data[:, :, :, i] = self._subheader.data_from_fileobj(frame_mapping[i][0]) if dtype is not None: data = data.astype(dtype, copy=False) return data def __getitem__(self, sliceobj): - """ Return slice `sliceobj` from ECAT data, optimizing if possible - """ + """Return slice `sliceobj` from ECAT data, optimizing if possible""" sliceobj = canonical_slicers(sliceobj, self.shape) # Indices into sliceobj referring to image axes ax_inds = [i for i, obj in enumerate(sliceobj) if obj is not None] @@ -724,7 +724,7 @@ def __getitem__(self, sliceobj): slice3 = sliceobj[ax_inds[3]] # We will load volume by volume. Make slicer into volume by dropping # index over the volume axis - in_slicer = sliceobj[:ax_inds[3]] + sliceobj[ax_inds[3] + 1:] + in_slicer = sliceobj[: ax_inds[3]] + sliceobj[ax_inds[3] + 1 :] # int index for 4th axis, load one slice if isinstance(slice3, Integral): data = self._subheader.data_from_fileobj(frame_mapping[slice3][0]) @@ -738,16 +738,15 @@ def __getitem__(self, sliceobj): in2out_ind = slice2outax(len(self.shape), sliceobj)[3] # Iterate over specified 4th axis indices for i in list(range(self.shape[3]))[slice3]: - data = self._subheader.data_from_fileobj( - frame_mapping[i][0]) + data = self._subheader.data_from_fileobj(frame_mapping[i][0]) out_slicer[in2out_ind] = i out_data[tuple(out_slicer)] = data[in_slicer] return out_data class EcatImage(SpatialImage): - """ Class returns a list of Ecat images, with one image(hdr/data) per frame - """ + """Class returns a list of Ecat images, with one image(hdr/data) per frame""" + _header = EcatHeader header_class = _header valid_exts = ('.v',) @@ -756,10 +755,8 @@ class EcatImage(SpatialImage): ImageArrayProxy = EcatImageArrayProxy - def __init__(self, dataobj, affine, header, - subheader, mlist, - extra=None, file_map=None): - """ Initialize Image + def __init__(self, dataobj, affine, header, subheader, mlist, extra=None, file_map=None): + """Initialize Image The image is a combination of (array, affine matrix, header, subheader, mlist) @@ -824,8 +821,9 @@ def __init__(self, dataobj, affine, header, @property def affine(self): if not self._subheader._check_affines(): - warnings.warn('Affines different across frames, loading affine ' - 'from FIRST frame', UserWarning) + warnings.warn( + 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + ) return self._affine def get_frame_affine(self, frame): @@ -854,8 +852,7 @@ def shape(self): return (x, y, z, nframes) def get_mlist(self): - """ get access to the mlist - """ + """get access to the mlist""" return self._mlist def get_subheaders(self): @@ -864,7 +861,7 @@ def get_subheaders(self): @staticmethod def _get_fileholders(file_map): - """ returns files specific to header and image of the image + """returns files specific to header and image of the image for ecat .v this is the same image file Returns @@ -887,7 +884,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # LOAD MLIST mlist = np.zeros((header['num_frames'], 4), dtype=np.int32) mlist_data = read_mlist(hdr_fid, hdr_copy.endianness) - mlist[:len(mlist_data)] = mlist_data + mlist[: len(mlist_data)] = mlist_data # LOAD SUBHEADERS subheaders = klass._subheader(hdr_copy, mlist, hdr_fid) # LOAD DATA @@ -895,11 +892,11 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): data = klass.ImageArrayProxy(subheaders) # Get affine if not subheaders._check_affines(): - warnings.warn('Affines different across frames, loading affine ' - 'from FIRST frame', UserWarning) + warnings.warn( + 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + ) aff = subheaders.get_frame_affine() - img = klass(data, aff, header, subheaders, mlist, - extra=None, file_map=file_map) + img = klass(data, aff, header, subheaders, mlist, extra=None, file_map=file_map) return img def _get_empty_dir(self): @@ -925,11 +922,10 @@ def _write_data(self, data, stream, pos, dtype=None, endianness=None): endianness = native_code stream.seek(pos) - make_array_writer(data.newbyteorder(endianness), - dtype).to_fileobj(stream) + make_array_writer(data.newbyteorder(endianness), dtype).to_fileobj(stream) def to_file_map(self, file_map=None): - """ Write ECAT7 image to `file_map` or contained ``self.file_map`` + """Write ECAT7 image to `file_map` or contained ``self.file_map`` The format consist of: @@ -1014,8 +1010,7 @@ def to_file_map(self, file_map=None): @classmethod def from_image(klass, img): - raise NotImplementedError("Ecat images can only be generated " - "from file objects") + raise NotImplementedError('Ecat images can only be generated ' 'from file objects') @classmethod def load(klass, filespec): diff --git a/nibabel/environment.py b/nibabel/environment.py index 768b4de34b..6f331eed5a 100644 --- a/nibabel/environment.py +++ b/nibabel/environment.py @@ -66,7 +66,7 @@ def get_nipy_user_dir(): def get_nipy_system_dir(): - r""" Get systemwide NIPY configuration file directory + r"""Get systemwide NIPY configuration file directory On posix systems this will be ``/etc/nipy``. On Windows, the directory is less useful, but by default it will be diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index 11a10bbe2b..bb75b54b1e 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Module implementing Euler angle rotations and their conversions +"""Module implementing Euler angle rotations and their conversions See: @@ -94,7 +94,7 @@ def euler2mat(z=0, y=0, x=0): - """ Return matrix for rotations around z, y and x axes + """Return matrix for rotations around z, y and x axes Uses the z, then y, then x convention above @@ -170,28 +170,22 @@ def euler2mat(z=0, y=0, x=0): if z: cosz = math.cos(z) sinz = math.sin(z) - Ms.append(np.array([[cosz, -sinz, 0], - [sinz, cosz, 0], - [0, 0, 1]])) + Ms.append(np.array([[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]])) if y: cosy = math.cos(y) siny = math.sin(y) - Ms.append(np.array([[cosy, 0, siny], - [0, 1, 0], - [-siny, 0, cosy]])) + Ms.append(np.array([[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]])) if x: cosx = math.cos(x) sinx = math.sin(x) - Ms.append(np.array([[1, 0, 0], - [0, cosx, -sinx], - [0, sinx, cosx]])) + Ms.append(np.array([[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]])) if Ms: return reduce(np.dot, Ms[::-1]) return np.eye(3) def mat2euler(M, cy_thresh=None): - """ Discover Euler angle vector from 3x3 matrix + """Discover Euler angle vector from 3x3 matrix Uses the conventions above. @@ -264,7 +258,7 @@ def mat2euler(M, cy_thresh=None): def euler2quat(z=0, y=0, x=0): - """ Return quaternion corresponding to these Euler angles + """Return quaternion corresponding to these Euler angles Uses the z, then y, then x convention above @@ -304,14 +298,18 @@ def euler2quat(z=0, y=0, x=0): sy = math.sin(y) cx = math.cos(x) sx = math.sin(x) - return np.array([cx * cy * cz - sx * sy * sz, - cx * sy * sz + cy * cz * sx, - cx * cz * sy - sx * cy * sz, - cx * cy * sz + sx * cz * sy]) + return np.array( + [ + cx * cy * cz - sx * sy * sz, + cx * sy * sz + cy * cz * sx, + cx * cz * sy - sx * cy * sz, + cx * cy * sz + sx * cz * sy, + ] + ) def quat2euler(q): - """ Return Euler angles corresponding to quaternion `q` + """Return Euler angles corresponding to quaternion `q` Parameters ---------- @@ -336,11 +334,12 @@ def quat2euler(q): """ # delayed import to avoid cyclic dependencies from . import quaternions as nq + return mat2euler(nq.quat2mat(q)) def euler2angle_axis(z=0, y=0, x=0): - """ Return angle, axis corresponding to these Euler angles + """Return angle, axis corresponding to these Euler angles Uses the z, then y, then x convention above @@ -370,11 +369,12 @@ def euler2angle_axis(z=0, y=0, x=0): """ # delayed import to avoid cyclic dependencies from . import quaternions as nq + return nq.quat2angle_axis(euler2quat(z, y, x)) def angle_axis2euler(theta, vector, is_normalized=False): - """ Convert angle, axis pair to Euler angles + """Convert angle, axis pair to Euler angles Parameters ---------- @@ -408,5 +408,6 @@ def angle_axis2euler(theta, vector, is_normalized=False): """ # delayed import to avoid cyclic dependencies from . import quaternions as nq + M = nq.angle_axis2mat(theta, vector, is_normalized) return mat2euler(M) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 17ac3e8180..f74c7b56eb 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -6,14 +6,13 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Common interface for any image format--volume or surface, binary or xml.""" +"""Common interface for any image format--volume or surface, binary or xml.""" import io from copy import deepcopy from urllib import request from .fileholders import FileHolder -from .filename_parser import (types_filenames, TypesFilenamesError, - splitext_addext) +from .filename_parser import types_filenames, TypesFilenamesError, splitext_addext from .openers import ImageOpener @@ -22,7 +21,7 @@ class ImageFileError(Exception): class FileBasedHeader: - """ Template class to implement header protocol """ + """Template class to implement header protocol""" @classmethod def from_header(klass, header=None): @@ -34,8 +33,9 @@ def from_header(klass, header=None): # different field names if type(header) == klass: return header.copy() - raise NotImplementedError("Header class requires a conversion " - f"from {klass} to {type(header)}") + raise NotImplementedError( + 'Header class requires a conversion ' f'from {klass} to {type(header)}' + ) @classmethod def from_fileobj(klass, fileobj): @@ -51,7 +51,7 @@ def __ne__(self, other): return not self == other def copy(self): - """ Copy object to independent representation + """Copy object to independent representation The copy should not be affected by any changes to the original object. @@ -162,6 +162,7 @@ class FileBasedImage: data. The ``file_map`` contents should therefore be such, that this will work. """ + header_class = FileBasedHeader _meta_sniff_len = 0 files_types = (('image', None),) @@ -172,7 +173,7 @@ class FileBasedImage: rw = True # Used in test code def __init__(self, header=None, extra=None, file_map=None): - """ Initialize image + """Initialize image The image is a combination of (header), with optional metadata in `extra`, and filename / file-like objects @@ -202,12 +203,11 @@ def header(self): return self._header def __getitem__(self, key): - """ No slicing or dictionary interface for images - """ - raise TypeError("Cannot slice image objects.") + """No slicing or dictionary interface for images""" + raise TypeError('Cannot slice image objects.') def get_filename(self): - """ Fetch the image filename + """Fetch the image filename Parameters ---------- @@ -228,7 +228,7 @@ def get_filename(self): return self.file_map[characteristic_type].filename def set_filename(self, filename): - """ Sets the files in the object from a given filename + """Sets the files in the object from a given filename The different image formats may check whether the filename has an extension characteristic of the format, and raise an error if @@ -255,7 +255,7 @@ def from_file_map(klass, file_map): @classmethod def filespec_to_file_map(klass, filespec): - """ Make `file_map` for this class from filename `filespec` + """Make `file_map` for this class from filename `filespec` Class method @@ -279,18 +279,17 @@ def filespec_to_file_map(klass, filespec): """ try: filenames = types_filenames( - filespec, klass.files_types, - trailing_suffixes=klass._compressed_suffixes) + filespec, klass.files_types, trailing_suffixes=klass._compressed_suffixes + ) except TypesFilenamesError: - raise ImageFileError( - f'Filespec "{filespec}" does not look right for class {klass}') + raise ImageFileError(f'Filespec "{filespec}" does not look right for class {klass}') file_map = {} for key, fname in filenames.items(): file_map[key] = FileHolder(filename=fname) return file_map def to_filename(self, filename, **kwargs): - r""" Write image to files implied by filename string + r"""Write image to files implied by filename string Parameters ---------- @@ -313,7 +312,7 @@ def to_file_map(self, file_map=None, **kwargs): @classmethod def make_file_map(klass, mapping=None): - """ Class method to make files holder for this image type + """Class method to make files holder for this image type Parameters ---------- @@ -346,7 +345,7 @@ def make_file_map(klass, mapping=None): @classmethod def instance_to_filename(klass, img, filename): - """ Save `img` in our own format, to name implied by `filename` + """Save `img` in our own format, to name implied by `filename` This is a class method @@ -362,7 +361,7 @@ def instance_to_filename(klass, img, filename): @classmethod def from_image(klass, img): - """ Class method to create new instance of own class from `img` + """Class method to create new instance of own class from `img` Parameters ---------- @@ -378,7 +377,7 @@ def from_image(klass, img): @classmethod def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): - """ Sniff metadata for image represented by `filename` + """Sniff metadata for image represented by `filename` Parameters ---------- @@ -402,13 +401,11 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): metadata file, whichever is the shorter. `fname` is the name of the sniffed file. """ - froot, ext, trailing = splitext_addext(filename, - klass._compressed_suffixes) + froot, ext, trailing = splitext_addext(filename, klass._compressed_suffixes) # Determine the metadata location t_fnames = types_filenames( - filename, - klass.files_types, - trailing_suffixes=klass._compressed_suffixes) + filename, klass.files_types, trailing_suffixes=klass._compressed_suffixes + ) meta_fname = t_fnames.get('header', filename) # Do not re-sniff if it would be from the same file @@ -425,7 +422,7 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): @classmethod def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): - """ Return True if `filename` may be image matching this class + """Return True if `filename` may be image matching this class Parameters ---------- @@ -458,8 +455,7 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): Read bytes content from found metadata. May be None if the file does not appear to have useful metadata. """ - froot, ext, trailing = splitext_addext(filename, - klass._compressed_suffixes) + froot, ext, trailing = splitext_addext(filename, klass._compressed_suffixes) if ext.lower() not in klass.valid_exts: return False, sniff if not hasattr(klass.header_class, 'may_contain_header'): @@ -468,9 +464,7 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): # Force re-sniff on too-short sniff if sniff is not None and len(sniff[0]) < klass._meta_sniff_len: sniff = None - sniff = klass._sniff_meta_for(filename, - max(klass._meta_sniff_len, sniff_max), - sniff) + sniff = klass._sniff_meta_for(filename, max(klass._meta_sniff_len, sniff_max), sniff) if sniff is None or len(sniff[0]) < klass._meta_sniff_len: return False, sniff return klass.header_class.may_contain_header(sniff[0]), sniff @@ -532,9 +526,7 @@ class SerializableImage(FileBasedImage): def _filemap_from_iobase(klass, io_obj: io.IOBase): """For single-file image types, make a file map with the correct key""" if len(klass.files_types) > 1: - raise NotImplementedError( - "(de)serialization is undefined for multi-file images" - ) + raise NotImplementedError('(de)serialization is undefined for multi-file images') return klass.make_file_map({klass.files_types[0][0]: io_obj}) @classmethod @@ -566,7 +558,7 @@ def to_stream(self, io_obj: io.IOBase, **kwargs): @classmethod def from_bytes(klass, bytestring: bytes): - """ Construct image from a byte string + """Construct image from a byte string Class method @@ -578,7 +570,7 @@ def from_bytes(klass, bytestring: bytes): return klass.from_stream(io.BytesIO(bytestring)) def to_bytes(self, **kwargs) -> bytes: - r""" Return a ``bytes`` object with the contents of the file that would + r"""Return a ``bytes`` object with the contents of the file that would be written if the image were saved. Parameters diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index f7dc9629fd..f2ec992da5 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Fileholder class """ +"""Fileholder class""" from copy import copy @@ -18,14 +18,10 @@ class FileHolderError(Exception): class FileHolder: - """ class to contain filename, fileobj and file position - """ + """class to contain filename, fileobj and file position""" - def __init__(self, - filename=None, - fileobj=None, - pos=0): - """ Initialize FileHolder instance + def __init__(self, filename=None, fileobj=None, pos=0): + """Initialize FileHolder instance Parameters ---------- @@ -43,7 +39,7 @@ def __init__(self, self.pos = pos def get_prepare_fileobj(self, *args, **kwargs): - """ Return fileobj if present, or return fileobj from filename + """Return fileobj if present, or return fileobj from filename Set position to that given in self.pos @@ -75,7 +71,7 @@ def get_prepare_fileobj(self, *args, **kwargs): return obj def same_file_as(self, other): - """ Test if `self` refers to same files / fileobj as `other` + """Test if `self` refers to same files / fileobj as `other` Parameters ---------- @@ -88,18 +84,16 @@ def same_file_as(self, other): True if `other` has the same filename (or both have None) and the same fileobj (or both have None """ - return ((self.filename == other.filename) and - (self.fileobj == other.fileobj)) + return (self.filename == other.filename) and (self.fileobj == other.fileobj) @property def file_like(self): - """ Return ``self.fileobj`` if not None, otherwise ``self.filename`` - """ + """Return ``self.fileobj`` if not None, otherwise ``self.filename``""" return self.fileobj if self.fileobj is not None else self.filename def copy_file_map(file_map): - r""" Copy mapping of fileholders given by `file_map` + r"""Copy mapping of fileholders given by `file_map` Parameters ---------- diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index e254019883..42e89fa721 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Create filename pairs, triplets etc, with expected extensions """ +"""Create filename pairs, triplets etc, with expected extensions""" import os import pathlib @@ -39,18 +39,21 @@ def _stringify_path(filepath_or_buffer): Copied from: https://github.com/pandas-dev/pandas/blob/325dd686de1589c17731cf93b649ed5ccb5a99b4/pandas/io/common.py#L131-L160 """ - if hasattr(filepath_or_buffer, "__fspath__"): + if hasattr(filepath_or_buffer, '__fspath__'): return filepath_or_buffer.__fspath__() elif isinstance(filepath_or_buffer, pathlib.Path): return str(filepath_or_buffer) return filepath_or_buffer -def types_filenames(template_fname, types_exts, - trailing_suffixes=('.gz', '.bz2'), - enforce_extensions=True, - match_case=False): - """ Return filenames with standard extensions from template name +def types_filenames( + template_fname, + types_exts, + trailing_suffixes=('.gz', '.bz2'), + enforce_extensions=True, + match_case=False, +): + """Return filenames with standard extensions from template name The typical case is returning image and header filenames for an Analyze image, that expects an 'image' file type with extension ``.img``, @@ -111,13 +114,12 @@ def types_filenames(template_fname, types_exts, """ template_fname = _stringify_path(template_fname) if not isinstance(template_fname, str): - raise TypesFilenamesError('Need file name as input ' - 'to set_filenames') + raise TypesFilenamesError('Need file name as input ' 'to set_filenames') if template_fname.endswith('.'): template_fname = template_fname[:-1] - filename, found_ext, ignored, guessed_name = \ - parse_filename(template_fname, types_exts, trailing_suffixes, - match_case) + filename, found_ext, ignored, guessed_name = parse_filename( + template_fname, types_exts, trailing_suffixes, match_case + ) # Flag cases where we just set the input name directly direct_set_name = None if enforce_extensions: @@ -128,13 +130,13 @@ def types_filenames(template_fname, types_exts, # an extension, but the wrong one raise TypesFilenamesError( f'File extension "{found_ext}" was not in ' - f'expected list: {[e for t, e in types_exts]}') + f'expected list: {[e for t, e in types_exts]}' + ) elif ignored: # there was no extension, but an ignored suffix # This is a special case like 'test.gz' (where .gz # is ignored). It's confusing to change # this to test.img.gz, or test.gz.img, so error - raise TypesFilenamesError( - f'Confusing ignored suffix {ignored} without extension') + raise TypesFilenamesError(f'Confusing ignored suffix {ignored} without extension') # if we've got to here, we have a guessed name and a found # extension. else: # not enforcing extensions. If there's an extension, we set the @@ -170,10 +172,7 @@ def types_filenames(template_fname, types_exts, return tfns -def parse_filename(filename, - types_exts, - trailing_suffixes, - match_case=False): +def parse_filename(filename, types_exts, trailing_suffixes, match_case=False): """Split filename into fileroot, extension, trailing suffix; guess type. Parameters @@ -252,10 +251,8 @@ def _iendswith(whole, end): return whole.lower().endswith(end.lower()) -def splitext_addext(filename, - addexts=('.gz', '.bz2', '.zst'), - match_case=False): - """ Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` +def splitext_addext(filename, addexts=('.gz', '.bz2', '.zst'), match_case=False): + """Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` where ``.gz`` may be any of passed `addext` trailing suffixes. diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index cc850132b8..8df199d0d2 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -1,4 +1,4 @@ -""" Utilities for getting array slices out of file-like objects +"""Utilities for getting array slices out of file-like objects """ import operator @@ -13,7 +13,7 @@ # Threshold for memory gap above which we always skip, to save memory # This value came from trying various values and looking at the timing with # ``bench_fileslice`` -SKIP_THRESH = 2 ** 8 +SKIP_THRESH = 2**8 class _NullLock: @@ -25,6 +25,7 @@ class _NullLock: It is used by the ``read_segments`` function in the event that a ``Lock`` is not provided by the caller. """ + def __enter__(self): pass @@ -33,7 +34,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def is_fancy(sliceobj): - """ Returns True if sliceobj is attempting fancy indexing + """Returns True if sliceobj is attempting fancy indexing Parameters ---------- @@ -61,7 +62,7 @@ def is_fancy(sliceobj): def canonical_slicers(sliceobj, shape, check_inds=True): - """ Return canonical version of `sliceobj` for array shape `shape` + """Return canonical version of `sliceobj` for array shape `shape` `sliceobj` is a slicer for an array ``A`` implied by `shape`. @@ -94,7 +95,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if not isinstance(sliceobj, tuple): sliceobj = (sliceobj,) if is_fancy(sliceobj): - raise ValueError("Cannot handle fancy indexing") + raise ValueError('Cannot handle fancy indexing') can_slicers = [] n_dim = len(shape) n_real = 0 @@ -103,10 +104,9 @@ def canonical_slicers(sliceobj, shape, check_inds=True): can_slicers.append(None) continue if slicer == Ellipsis: - remaining = sliceobj[i + 1:] + remaining = sliceobj[i + 1 :] if Ellipsis in remaining: - raise ValueError("More than one Ellipsis in slicing " - "expression") + raise ValueError('More than one Ellipsis in slicing ' 'expression') real_remaining = [r for r in remaining if r is not None] n_ellided = n_dim - n_real - len(real_remaining) can_slicers.extend((slice(None),) * n_ellided) @@ -120,8 +120,11 @@ def canonical_slicers(sliceobj, shape, check_inds=True): except TypeError: # should be slice object if slicer != slice(None): # Could this be full slice? - if slicer.stop == dim_len and slicer.start in (None, 0) and \ - slicer.step in (None, 1): + if ( + slicer.stop == dim_len + and slicer.start in (None, 0) + and slicer.step in (None, 1) + ): slicer = slice(None) else: if slicer < 0: @@ -136,7 +139,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): def slice2outax(ndim, sliceobj): - """ Matching output axes for input array ndim `ndim` and slice `sliceobj` + """Matching output axes for input array ndim `ndim` and slice `sliceobj` Parameters ---------- @@ -166,7 +169,7 @@ def slice2outax(ndim, sliceobj): def slice2len(slicer, in_len): - """ Output length after slicing original length `in_len` with `slicer` + """Output length after slicing original length `in_len` with `slicer` Parameters ---------- slicer : slice object @@ -188,8 +191,7 @@ def slice2len(slicer, in_len): def _full_slicer_len(full_slicer): - """ Return length of slicer processed by ``fill_slicer`` - """ + """Return length of slicer processed by ``fill_slicer``""" start, stop, step = full_slicer.start, full_slicer.stop, full_slicer.step if stop is None: # case of negative step stop = -1 @@ -200,7 +202,7 @@ def _full_slicer_len(full_slicer): def fill_slicer(slicer, in_len): - """ Return slice object with Nones filled out to match `in_len` + """Return slice object with Nones filled out to match `in_len` Also fixes too large stop / start values according to slice() slicing rules. @@ -245,7 +247,7 @@ def fill_slicer(slicer, in_len): def predict_shape(sliceobj, in_shape): - """ Predict shape of array from slicing array shape `shape` with `sliceobj` + """Predict shape of array from slicing array shape `shape` with `sliceobj` Parameters ---------- @@ -278,7 +280,7 @@ def predict_shape(sliceobj, in_shape): def _positive_slice(slicer): - """ Return full slice `slicer` enforcing positive step size + """Return full slice `slicer` enforcing positive step size `slicer` assumed full in the sense of :func:`fill_slicer` """ @@ -294,11 +296,8 @@ def _positive_slice(slicer): return slice(end, start + 1, -step) -def threshold_heuristic(slicer, - dim_len, - stride, - skip_thresh=SKIP_THRESH): - """ Whether to force full axis read or contiguous read of stepped slice +def threshold_heuristic(slicer, dim_len, stride, skip_thresh=SKIP_THRESH): + """Whether to force full axis read or contiguous read of stepped slice Allows :func:`fileslice` to sometimes read memory that it will throw away in order to get maximum speed. In other words, trade memory for fewer disk @@ -350,9 +349,8 @@ def threshold_heuristic(slicer, return 'full' if gap_size <= skip_thresh else 'contiguous' -def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, - heuristic=threshold_heuristic): - """ Return maybe modified slice and post-slice slicing for `slicer` +def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, heuristic=threshold_heuristic): + """Return maybe modified slice and post-slice slicing for `slicer` Parameters ---------- @@ -428,7 +426,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, if action not in ('full', 'contiguous', None): raise ValueError(f'Unexpected return {action} from heuristic') if is_int and action == 'contiguous': - raise ValueError("int index cannot be contiguous") + raise ValueError('int index cannot be contiguous') # If this is the slowest changing dimension, never upgrade None or # contiguous beyond contiguous (we've already covered the already-full # case) @@ -442,8 +440,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, if step not in (-1, 1): if step < 0: slicer = _positive_slice(slicer) - return (slice(slicer.start, slicer.stop, 1), - slice(None, None, step)) + return (slice(slicer.start, slicer.stop, 1), slice(None, None, step)) # We only need to be positive if is_int: return slicer, 'dropped' @@ -452,9 +449,8 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, return _positive_slice(slicer), slice(None, None, -1) -def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, - heuristic=threshold_heuristic): - """ Return parameters for slicing array with `sliceobj` given memory layout +def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, heuristic=threshold_heuristic): + """Return parameters for slicing array with `sliceobj` given memory layout Calculate the best combination of skips / (read + discard) to use for reading the data from disk / memory, then generate corresponding @@ -495,7 +491,7 @@ def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, `segments` and reshaping via `read_shape`. Slices are in terms of `read_shape`. If empty, no new slicing to apply """ - if order not in "CF": + if order not in 'CF': raise ValueError("order should be one of 'CF'") sliceobj = canonical_slicers(sliceobj, in_shape) # order fastest changing first (record reordering) @@ -505,8 +501,7 @@ def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, # Analyze sliceobj for new read_slicers and fixup post_slicers # read_slicers are the virtual slices; we don't slice with these, but use # the slice definitions to read the relevant memory from disk - read_slicers, post_slicers = optimize_read_slicers( - sliceobj, in_shape, itemsize, heuristic) + read_slicers, post_slicers = optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic) # work out segments corresponding to read_slicers segments = slicers2segments(read_slicers, in_shape, offset, itemsize) # Make post_slicers empty if it is the slicing identity operation @@ -521,7 +516,7 @@ def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, def optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic): - """ Calculates slices to read from disk, and apply after reading + """Calculates slices to read from disk, and apply after reading Parameters ---------- @@ -569,7 +564,8 @@ def optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic): is_last = real_no == len(in_shape) # make modified sliceobj (to_read, post_slice) read_slicer, post_slicer = optimize_slicer( - slicer, dim_len, all_full, is_last, stride, heuristic) + slicer, dim_len, all_full, is_last, stride, heuristic + ) read_slicers.append(read_slicer) all_full = all_full and read_slicer == slice(None) if not isinstance(read_slicer, Integral): @@ -579,7 +575,7 @@ def optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic): def slicers2segments(read_slicers, in_shape, offset, itemsize): - """ Get segments from `read_slicers` given `in_shape` and memory steps + """Get segments from `read_slicers` given `in_shape` and memory steps Parameters ---------- @@ -627,9 +623,7 @@ def slicers2segments(read_slicers, in_shape, offset, itemsize): else: # slice object segments = all_segments all_segments = [] - for i in range(read_slicer.start, - read_slicer.stop, - read_slicer.step): + for i in range(read_slicer.start, read_slicer.stop, read_slicer.step): for s in segments: all_segments.append([s[0] + stride * i, s[1]]) all_full = all_full and is_full @@ -638,7 +632,7 @@ def slicers2segments(read_slicers, in_shape, offset, itemsize): def read_segments(fileobj, segments, n_bytes, lock=None): - """ Read `n_bytes` byte data implied by `segments` from `fileobj` + """Read `n_bytes` byte data implied by `segments` from `fileobj` Parameters ---------- @@ -670,7 +664,7 @@ def read_segments(fileobj, segments, n_bytes, lock=None): if len(segments) == 0: if n_bytes != 0: - raise ValueError("No segments, but non-zero n_bytes") + raise ValueError('No segments, but non-zero n_bytes') return b'' if len(segments) == 1: offset, length = segments[0] @@ -678,7 +672,7 @@ def read_segments(fileobj, segments, n_bytes, lock=None): fileobj.seek(offset) bytes = fileobj.read(length) if len(bytes) != n_bytes: - raise ValueError("Whoops, not enough data in file") + raise ValueError('Whoops, not enough data in file') return bytes # More than one segment bytes = mmap(-1, n_bytes) @@ -687,13 +681,12 @@ def read_segments(fileobj, segments, n_bytes, lock=None): fileobj.seek(offset) bytes.write(fileobj.read(length)) if bytes.tell() != n_bytes: - raise ValueError("Oh dear, n_bytes does not look right") + raise ValueError('Oh dear, n_bytes does not look right') return bytes -def _simple_fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', - heuristic=None): - """ Read all data from `fileobj` into array, then slice with `sliceobj` +def _simple_fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', heuristic=None): + """Read all data from `fileobj` into array, then slice with `sliceobj` The simplest possible thing; read all the data into the full array, then slice the full array. @@ -728,9 +721,10 @@ def _simple_fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', return new_arr[sliceobj] -def fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', - heuristic=threshold_heuristic, lock=None): - """ Slice array in `fileobj` using `sliceobj` slicer and array definitions +def fileslice( + fileobj, sliceobj, shape, dtype, offset=0, order='C', heuristic=threshold_heuristic, lock=None +): + """Slice array in `fileobj` using `sliceobj` slicer and array definitions `fileobj` contains the contiguous binary data for an array ``A`` of shape, dtype, memory layout `shape`, `dtype`, `order`, with the binary data @@ -781,19 +775,18 @@ def fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', Array in `fileobj` as sliced with `sliceobj` """ if is_fancy(sliceobj): - raise ValueError("Cannot handle fancy indexing") + raise ValueError('Cannot handle fancy indexing') dtype = np.dtype(dtype) itemsize = int(dtype.itemsize) - segments, sliced_shape, post_slicers = calc_slicedefs( - sliceobj, shape, itemsize, offset, order) + segments, sliced_shape, post_slicers = calc_slicedefs(sliceobj, shape, itemsize, offset, order) n_bytes = reduce(operator.mul, sliced_shape, 1) * itemsize arr_data = read_segments(fileobj, segments, n_bytes, lock) sliced = np.ndarray(sliced_shape, dtype, buffer=arr_data, order=order) return sliced[post_slicers] -def strided_scalar(shape, scalar=0.): - """ Return array shape `shape` where all entries point to value `scalar` +def strided_scalar(shape, scalar=0.0): + """Return array shape `shape` where all entries point to value `scalar` Parameters ---------- diff --git a/nibabel/fileutils.py b/nibabel/fileutils.py index c518cdd921..da44fe51a9 100644 --- a/nibabel/fileutils.py +++ b/nibabel/fileutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utilities for reading and writing to binary file formats +"""Utilities for reading and writing to binary file formats """ diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index a588fb06e5..83c12f8682 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -1,6 +1,13 @@ """Reading functions for freesurfer files """ -from .io import read_geometry, read_morph_data, write_morph_data, \ - read_annot, read_label, write_geometry, write_annot +from .io import ( + read_geometry, + read_morph_data, + write_morph_data, + read_annot, + read_label, + write_geometry, + write_annot, +) from .mghformat import load, save, MGHImage diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 77f7fe892a..36013c3af2 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -1,4 +1,4 @@ -""" Read / write FreeSurfer geometry, morphometry, label, annotation formats +"""Read / write FreeSurfer geometry, morphometry, label, annotation formats """ import warnings @@ -10,7 +10,7 @@ from ..openers import Opener -_ANNOT_DT = ">i4" +_ANNOT_DT = '>i4' """Data type for Freesurfer `.annot` files. Used by :func:`read_annot` and :func:`write_annot`. All data (apart from @@ -31,7 +31,7 @@ def _fread3(fobj): n : int A 3 byte int """ - b1, b2, b3 = np.fromfile(fobj, ">u1", 3) + b1, b2, b3 = np.fromfile(fobj, '>u1', 3) return (b1 << 16) + (b2 << 8) + b3 @@ -48,8 +48,7 @@ def _fread3_many(fobj, n): out : 1D array An array of 3 byte int """ - b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1, - 3).astype(int).T + b1, b2, b3 = np.fromfile(fobj, '>u1', 3 * n).reshape(-1, 3).astype(int).T return (b1 << 16) + (b2 << 8) + b3 @@ -60,12 +59,11 @@ def _read_volume_info(fobj): if not np.array_equal(head, [20]): # Read two bytes more head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)]) if not np.array_equal(head, [2, 0, 20]): - warnings.warn("Unknown extension code.") + warnings.warn('Unknown extension code.') return volume_info volume_info['head'] = head - for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', - 'zras', 'cras']: + for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', 'zras', 'cras']: pair = fobj.readline().decode('utf-8').split('=') if pair[0].strip() != key or len(pair) != 2: raise OSError('Error parsing volume info.') @@ -142,12 +140,12 @@ def read_geometry(filepath, read_metadata=False, read_stamp=False): TRIANGLE_MAGIC = 16777214 QUAD_MAGIC = 16777215 NEW_QUAD_MAGIC = 16777213 - with open(filepath, "rb") as fobj: + with open(filepath, 'rb') as fobj: magic = _fread3(fobj) if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file nvert = _fread3(fobj) nquad = _fread3(fobj) - (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.) + (fmt, div) = ('>i2', 100.0) if magic == QUAD_MAGIC else ('>f4', 1.0) coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div coords = coords.reshape(-1, 3) quads = _fread3_many(fobj, nquad * 4) @@ -172,15 +170,15 @@ def read_geometry(filepath, read_metadata=False, read_stamp=False): elif magic == TRIANGLE_MAGIC: # Triangle file create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8') fobj.readline() - vnum = np.fromfile(fobj, ">i4", 1)[0] - fnum = np.fromfile(fobj, ">i4", 1)[0] - coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3) - faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3) + vnum = np.fromfile(fobj, '>i4', 1)[0] + fnum = np.fromfile(fobj, '>i4', 1)[0] + coords = np.fromfile(fobj, '>f4', vnum * 3).reshape(vnum, 3) + faces = np.fromfile(fobj, '>i4', fnum * 3).reshape(fnum, 3) if read_metadata: volume_info = _read_volume_info(fobj) else: - raise ValueError("File does not appear to be a Freesurfer surface") + raise ValueError('File does not appear to be a Freesurfer surface') coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits @@ -195,8 +193,7 @@ def read_geometry(filepath, read_metadata=False, read_stamp=False): return ret -def write_geometry(filepath, coords, faces, create_stamp=None, - volume_info=None): +def write_geometry(filepath, coords, faces, create_stamp=None, volume_info=None): """Write a triangular format Freesurfer surface mesh. Parameters @@ -228,11 +225,11 @@ def write_geometry(filepath, coords, faces, create_stamp=None, magic_bytes = np.array([255, 255, 254], dtype=np.uint8) if create_stamp is None: - create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" + create_stamp = f'created by {getpass.getuser()} on {time.ctime()}' with open(filepath, 'wb') as fobj: magic_bytes.tofile(fobj) - fobj.write((f"{create_stamp}\n\n").encode('utf-8')) + fobj.write((f'{create_stamp}\n\n').encode('utf-8')) np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj) @@ -263,15 +260,15 @@ def read_morph_data(filepath): curv : numpy array Vector representation of surface morpometry values """ - with open(filepath, "rb") as fobj: + with open(filepath, 'rb') as fobj: magic = _fread3(fobj) if magic == 16777215: - vnum = np.fromfile(fobj, ">i4", 3)[0] - curv = np.fromfile(fobj, ">f4", vnum) + vnum = np.fromfile(fobj, '>i4', 3)[0] + curv = np.fromfile(fobj, '>f4', vnum) else: vnum = magic _fread3(fobj) - curv = np.fromfile(fobj, ">i2", vnum) / 100 + curv = np.fromfile(fobj, '>i2', vnum) / 100 return curv @@ -302,13 +299,13 @@ def write_morph_data(file_like, values, fnum=0): vector = np.asarray(values) vnum = np.prod(vector.shape) if vector.shape not in ((vnum,), (vnum, 1), (1, vnum), (vnum, 1, 1)): - raise ValueError("Invalid shape: argument values must be a vector") + raise ValueError('Invalid shape: argument values must be a vector') i4info = np.iinfo('i4') if vnum > i4info.max: - raise ValueError("Too many values for morphometry file") + raise ValueError('Too many values for morphometry file') if not i4info.min <= fnum <= i4info.max: - raise ValueError(f"Argument fnum must be between {i4info.min} and {i4info.max}") + raise ValueError(f'Argument fnum must be between {i4info.min} and {i4info.max}') with Opener(file_like, 'wb') as fobj: fobj.write(magic_bytes) @@ -356,7 +353,7 @@ def read_annot(filepath, orig_ids=False): names : list of bytes The names of the labels. The length of the list is n_labels. """ - with open(filepath, "rb") as fobj: + with open(filepath, 'rb') as fobj: dt = _ANNOT_DT # number of vertices @@ -431,7 +428,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): for i in range(n_entries): # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] + name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] names.append(name) # read RGBT for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) @@ -475,7 +472,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): ctab = np.zeros((max_index, 5), dt) # orig_tab string length + string length = np.fromfile(fobj, dt, 1)[0] - np.fromfile(fobj, "|S%d" % length, 1)[0] # Orig table path + np.fromfile(fobj, '|S%d' % length, 1)[0] # Orig table path # number of LUT entries present in the file entries_to_read = np.fromfile(fobj, dt, 1)[0] names = list() @@ -484,7 +481,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): idx = np.fromfile(fobj, dt, 1)[0] # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] + name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] names.append(name) # RGBT ctab[idx, :4] = np.fromfile(fobj, dt, 4) @@ -519,7 +516,7 @@ def write_annot(filepath, labels, ctab, names, fill_ctab=True): (n_labels, 4) or (n_labels, 5) - if the latter, the final column is ignored. """ - with open(filepath, "wb") as fobj: + with open(filepath, 'wb') as fobj: dt = _ANNOT_DT vnum = len(labels) @@ -545,8 +542,7 @@ def write_string(s): clut_labels[np.where(labels == -1)] = 0 # vno, label - data = np.vstack((np.array(range(vnum)), - clut_labels)).T.astype(dt) + data = np.vstack((np.array(range(vnum)), clut_labels)).T.astype(dt) data.tofile(fobj) # tag @@ -598,8 +594,7 @@ def read_label(filepath, read_scalars=False): def _serialize_volume_info(volume_info): """Helper for serializing the volume info.""" - keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', - 'zras', 'cras'] + keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', 'zras', 'cras'] diff = set(volume_info.keys()).difference(keys) if len(diff) > 0: raise ValueError(f'Invalid volume info: {diff.pop()}.') @@ -607,9 +602,11 @@ def _serialize_volume_info(volume_info): strings = list() for key in keys: if key == 'head': - if not (np.array_equal(volume_info[key], [20]) or np.array_equal( - volume_info[key], [2, 0, 20])): - warnings.warn("Unknown extension code.") + if not ( + np.array_equal(volume_info[key], [20]) + or np.array_equal(volume_info[key], [2, 0, 20]) + ): + warnings.warn('Unknown extension code.') strings.append(np.array(volume_info[key], dtype='>i4').tobytes()) elif key in ('valid', 'filename'): val = volume_info[key] @@ -620,5 +617,6 @@ def _serialize_volume_info(volume_info): else: val = volume_info[key] strings.append( - f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode('utf-8')) + f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode('utf-8') + ) return b''.join(strings) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 9d2cdb905b..45881ba313 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Header and image reading / writing functions for MGH image format +"""Header and image reading / writing functions for MGH image format Author: Krish Subramaniam """ @@ -14,8 +14,7 @@ import numpy as np from ..affines import voxel_sizes, from_matvec -from ..volumeutils import (array_to_file, array_from_file, endian_codes, - Recoder) +from ..volumeutils import array_to_file, array_from_file, endian_codes, Recoder from ..filebasedimages import SerializableImage from ..filename_parser import _stringify_path from ..spatialimages import HeaderDataError, SpatialImage @@ -55,20 +54,62 @@ # caveat: Note that it's ambiguous to get the code given the bytespervoxel # caveat 2: Note that the bytespervox you get is in str ( not an int) _dtdefs = ( # code, conversion function, dtype, bytes per voxel - (0, 'uint8', '>u1', '1', 'MRI_UCHAR', np.uint8, np.dtype(np.uint8), - np.dtype(np.uint8).newbyteorder('>')), - (4, 'int16', '>i2', '2', 'MRI_SHORT', np.int16, np.dtype(np.int16), - np.dtype(np.int16).newbyteorder('>')), - (1, 'int32', '>i4', '4', 'MRI_INT', np.int32, np.dtype(np.int32), - np.dtype(np.int32).newbyteorder('>')), - (3, 'float', '>f4', '4', 'MRI_FLOAT', np.float32, np.dtype(np.float32), - np.dtype(np.float32).newbyteorder('>'))) + ( + 0, + 'uint8', + '>u1', + '1', + 'MRI_UCHAR', + np.uint8, + np.dtype(np.uint8), + np.dtype(np.uint8).newbyteorder('>'), + ), + ( + 4, + 'int16', + '>i2', + '2', + 'MRI_SHORT', + np.int16, + np.dtype(np.int16), + np.dtype(np.int16).newbyteorder('>'), + ), + ( + 1, + 'int32', + '>i4', + '4', + 'MRI_INT', + np.int32, + np.dtype(np.int32), + np.dtype(np.int32).newbyteorder('>'), + ), + ( + 3, + 'float', + '>f4', + '4', + 'MRI_FLOAT', + np.float32, + np.dtype(np.float32), + np.dtype(np.float32).newbyteorder('>'), + ), +) # make full code alias bank, including dtype column -data_type_codes = Recoder(_dtdefs, fields=('code', 'label', 'dtype', - 'bytespervox', 'mritype', - 'np_dtype1', 'np_dtype2', - 'numpy_dtype')) +data_type_codes = Recoder( + _dtdefs, + fields=( + 'code', + 'label', + 'dtype', + 'bytespervox', + 'mritype', + 'np_dtype1', + 'np_dtype2', + 'numpy_dtype', + ), +) class MGHError(Exception): @@ -80,21 +121,20 @@ class MGHError(Exception): class MGHHeader(LabeledWrapStruct): - """ Class for MGH format header + """Class for MGH format header The header also consists of the footer data which MGH places after the data chunk. """ + # Copies of module-level definitions template_dtype = hf_dtype _hdrdtype = header_dtype _ftrdtype = footer_dtype _data_type_codes = data_type_codes - def __init__(self, - binaryblock=None, - check=True): - """ Initialize header from binary data block + def __init__(self, binaryblock=None, check=True): + """Initialize header from binary data block Parameters ---------- @@ -111,11 +151,8 @@ def __init__(self, # Right zero-pad or truncate binaryblock to appropriate size # Footer is optional and may contain variable-length text fields, # so limit to fixed fields - binaryblock = (binaryblock[:full_size] + - b'\x00' * (full_size - len(binaryblock))) - super(MGHHeader, self).__init__(binaryblock=binaryblock, - endianness='big', - check=False) + binaryblock = binaryblock[:full_size] + b'\x00' * (full_size - len(binaryblock)) + super(MGHHeader, self).__init__(binaryblock=binaryblock, endianness='big', check=False) if not self._structarr['goodRASFlag']: self._set_affine_default() if check: @@ -137,8 +174,7 @@ def _get_checks(klass): @classmethod def from_header(klass, header=None, check=True): - """ Class method to create MGH header from another MGH header - """ + """Class method to create MGH header from another MGH header""" # own type, return copy if type(header) == klass: obj = header.copy() @@ -159,19 +195,19 @@ def from_fileobj(klass, fileobj, check=True): # dimensions from the header, skip over and then read the footer # information hdr_str = fileobj.read(klass._hdrdtype.itemsize) - hdr_str_to_np = np.ndarray(shape=(), dtype=klass._hdrdtype, - buffer=hdr_str) + hdr_str_to_np = np.ndarray(shape=(), dtype=klass._hdrdtype, buffer=hdr_str) if not np.all(hdr_str_to_np['dims']): raise MGHError('Dimensions of the data should be non-zero') tp = int(hdr_str_to_np['type']) - fileobj.seek(DATA_OFFSET + - int(klass._data_type_codes.bytespervox[tp]) * - np.prod(hdr_str_to_np['dims'])) + fileobj.seek( + DATA_OFFSET + + int(klass._data_type_codes.bytespervox[tp]) * np.prod(hdr_str_to_np['dims']) + ) ftr_str = fileobj.read(klass._ftrdtype.itemsize) return klass(hdr_str + ftr_str, check=check) def get_affine(self): - """ Get the affine transform from the header information. + """Get the affine transform from the header information. MGH format doesn't store the transform directly. Instead it's gleaned from the zooms ( delta ), direction cosines ( Mdc ), RAS centers ( @@ -186,29 +222,27 @@ def get_affine(self): get_best_affine = get_affine def get_vox2ras(self): - """return the get_affine() - """ + """return the get_affine()""" return self.get_affine() def get_vox2ras_tkr(self): - """ Get the vox2ras-tkr transform. See "Torig" here: - https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems + """Get the vox2ras-tkr transform. See "Torig" here: + https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems """ ds = self._structarr['delta'] ns = self._structarr['dims'][:3] * ds / 2.0 - v2rtkr = np.array([[-ds[0], 0, 0, ns[0]], - [0, 0, ds[2], -ns[2]], - [0, -ds[1], 0, ns[1]], - [0, 0, 0, 1]], dtype=np.float32) + v2rtkr = np.array( + [[-ds[0], 0, 0, ns[0]], [0, 0, ds[2], -ns[2]], [0, -ds[1], 0, ns[1]], [0, 0, 0, 1]], + dtype=np.float32, + ) return v2rtkr def get_ras2vox(self): - """return the inverse get_affine() - """ + """return the inverse get_affine()""" return np.linalg.inv(self.get_affine()) def get_data_dtype(self): - """ Get numpy dtype for MGH data + """Get numpy dtype for MGH data For examples see ``set_data_dtype`` """ @@ -217,8 +251,7 @@ def get_data_dtype(self): return dtype def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code or dtype or type - """ + """Set numpy dtype for data from code or dtype or type""" try: code = self._data_type_codes[datatype] except KeyError: @@ -226,7 +259,7 @@ def set_data_dtype(self, datatype): self._structarr['type'] = code def _ndims(self): - """ Get dimensionality of data + """Get dimensionality of data MGH does not encode dimensionality explicitly, so an image where the fourth dimension is 1 is treated as three-dimensional. @@ -238,7 +271,7 @@ def _ndims(self): return 3 + (self._structarr['dims'][3] > 1) def get_zooms(self): - """ Get zooms from header + """Get zooms from header Returns the spacing of voxels in the x, y, and z dimensions. For four-dimensional files, a fourth zoom is included, equal to the @@ -259,7 +292,7 @@ def get_zooms(self): return tuple(self._structarr['delta']) + tzoom def set_zooms(self, zooms): - """ Set zooms into header fields + """Set zooms into header fields Sets the spacing of voxels in the x, y, and z dimensions. For four-dimensional files, a temporal zoom (repetition time, or TR, in @@ -277,8 +310,9 @@ def set_zooms(self, zooms): if len(zooms) > ndims: raise HeaderDataError('Expecting %d zoom values' % ndims) if np.any(zooms[:3] <= 0): - raise HeaderDataError('Spatial (first three) zooms must be positive; got ' - f'{tuple(zooms[:3])}') + raise HeaderDataError( + 'Spatial (first three) zooms must be positive; got ' f'{tuple(zooms[:3])}' + ) hdr['delta'] = zooms[:3] if len(zooms) == 4: if zooms[3] < 0: @@ -286,8 +320,7 @@ def set_zooms(self, zooms): hdr['tr'] = zooms[3] def get_data_shape(self): - """ Get shape of data - """ + """Get shape of data""" shape = tuple(self._structarr['dims']) # If last dimension (nframes) is 1, remove it because # we want to maintain 3D and it's redundant @@ -296,7 +329,7 @@ def get_data_shape(self): return shape def set_data_shape(self, shape): - """ Set shape of data + """Set shape of data Parameters ---------- @@ -305,34 +338,30 @@ def set_data_shape(self, shape): """ shape = tuple(shape) if len(shape) > 4: - raise ValueError("Shape may be at most 4 dimensional") + raise ValueError('Shape may be at most 4 dimensional') self._structarr['dims'] = shape + (1,) * (4 - len(shape)) self._structarr['delta'] = 1 def get_data_bytespervox(self): - """ Get the number of bytes per voxel of the data - """ - return int(self._data_type_codes.bytespervox[ - int(self._structarr['type'])]) + """Get the number of bytes per voxel of the data""" + return int(self._data_type_codes.bytespervox[int(self._structarr['type'])]) def get_data_size(self): - """ Get the number of bytes the data chunk occupies. - """ + """Get the number of bytes the data chunk occupies.""" return self.get_data_bytespervox() * np.prod(self._structarr['dims']) def get_data_offset(self): - """ Return offset into data file to read data - """ + """Return offset into data file to read data""" return DATA_OFFSET def get_footer_offset(self): - """ Return offset where the footer resides. - Occurs immediately after the data chunk. + """Return offset where the footer resides. + Occurs immediately after the data chunk. """ return self.get_data_offset() + self.get_data_size() def data_from_fileobj(self, fileobj): - """ Read data array from `fileobj` + """Read data array from `fileobj` Parameters ---------- @@ -350,25 +379,23 @@ def data_from_fileobj(self, fileobj): return array_from_file(shape, dtype, fileobj, offset) def get_slope_inter(self): - """ MGH format does not do scaling? - """ + """MGH format does not do scaling?""" return None, None @classmethod def guessed_endian(klass, mapping): - """ MGHHeader data must be big-endian """ + """MGHHeader data must be big-endian""" return '>' @classmethod def default_structarr(klass, endianness=None): - """ Return header data for empty header + """Return header data for empty header Ignores byte order; always big endian """ if endianness is not None and endian_codes[endianness] != '>': raise ValueError('MGHHeader must always be big endian') - structarr = super(MGHHeader, - klass).default_structarr(endianness=endianness) + structarr = super(MGHHeader, klass).default_structarr(endianness=endianness) structarr['version'] = 1 structarr['dims'] = 1 structarr['type'] = 3 @@ -378,15 +405,14 @@ def default_structarr(klass, endianness=None): return structarr def _set_affine_default(self): - """ If goodRASFlag is 0, set the default affine - """ + """If goodRASFlag is 0, set the default affine""" self._structarr['goodRASFlag'] = 1 self._structarr['delta'] = 1 self._structarr['Mdc'] = [[-1, 0, 0], [0, 0, 1], [0, -1, 0]] self._structarr['Pxyz_c'] = 0 def writehdr_to(self, fileobj): - """ Write header to fileobj + """Write header to fileobj Write starts at the beginning. @@ -399,14 +425,13 @@ def writehdr_to(self, fileobj): ------- None """ - hdr_nofooter = np.ndarray((), dtype=self._hdrdtype, - buffer=self.binaryblock) + hdr_nofooter = np.ndarray((), dtype=self._hdrdtype, buffer=self.binaryblock) # goto the very beginning of the file-like obj fileobj.seek(0) fileobj.write(hdr_nofooter.tobytes()) def writeftr_to(self, fileobj): - """ Write footer to fileobj + """Write footer to fileobj Footer data is located after the data chunk. So move there and write. @@ -420,17 +445,18 @@ def writeftr_to(self, fileobj): None """ ftr_loc_in_hdr = len(self.binaryblock) - self._ftrdtype.itemsize - ftr_nd = np.ndarray((), dtype=self._ftrdtype, - buffer=self.binaryblock, offset=ftr_loc_in_hdr) + ftr_nd = np.ndarray( + (), dtype=self._ftrdtype, buffer=self.binaryblock, offset=ftr_loc_in_hdr + ) fileobj.seek(self.get_footer_offset()) fileobj.write(ftr_nd.tobytes()) def copy(self): - """ Return copy of structure """ + """Return copy of structure""" return self.__class__(self.binaryblock, check=False) def as_byteswapped(self, endianness=None): - """ Return new object with given ``endianness`` + """Return new object with given ``endianness`` If big endian, returns a copy of the object. Otherwise raises ValueError. @@ -447,8 +473,7 @@ def as_byteswapped(self, endianness=None): """ if endianness is None or endian_codes[endianness] != '>': - raise ValueError('Cannot byteswap MGHHeader - ' - 'must always be big endian') + raise ValueError('Cannot byteswap MGHHeader - ' 'must always be big endian') return self.copy() @classmethod @@ -458,13 +483,12 @@ def diagnose_binaryblock(klass, binaryblock, endianness=None): wstr = klass(binaryblock, check=False) battrun = BatteryRunner(klass._get_checks()) reports = battrun.check_only(wstr) - return '\n'.join([report.message - for report in reports if report.message]) + return '\n'.join([report.message for report in reports if report.message]) class MGHImage(SpatialImage, SerializableImage): - """ Class for MGH format image - """ + """Class for MGH format image""" + header_class = MGHHeader valid_exts = ('.mgh', '.mgz') # Register that .mgz extension signals gzip compression @@ -477,13 +501,13 @@ class MGHImage(SpatialImage, SerializableImage): ImageArrayProxy = ArrayProxy - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None): + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): shape = dataobj.shape if len(shape) < 3: dataobj = reshape_dataobj(dataobj, shape + (1,) * (3 - len(shape))) - super(MGHImage, self).__init__(dataobj, affine, header=header, - extra=extra, file_map=file_map) + super(MGHImage, self).__init__( + dataobj, affine, header=header, extra=extra, file_map=file_map + ) @classmethod def filespec_to_file_map(klass, filespec): @@ -495,7 +519,7 @@ def filespec_to_file_map(klass, filespec): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -532,13 +556,14 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): affine = header.get_affine() hdr_copy = header.copy() # Pass original image fileobj / filename to array proxy - data = klass.ImageArrayProxy(img_fh.file_like, hdr_copy, mmap=mmap, - keep_file_open=keep_file_open) + data = klass.ImageArrayProxy( + img_fh.file_like, hdr_copy, mmap=mmap, keep_file_open=keep_file_open + ) img = klass(data, affine, header, file_map=file_map) return img def to_file_map(self, file_map=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -559,7 +584,7 @@ def to_file_map(self, file_map=None): self.file_map = file_map def _write_data(self, mghfile, data, header): - """ Utility routine to write image + """Utility routine to write image Parameters ---------- @@ -573,14 +598,13 @@ def _write_data(self, mghfile, data, header): """ shape = header.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % - ', '.join(str(s) for s in shape)) + raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) offset = header.get_data_offset() out_dtype = header.get_data_dtype() array_to_file(data, mghfile, out_dtype, offset) def _affine2header(self): - """ Unconditionally set affine into the header """ + """Unconditionally set affine into the header""" hdr = self._header shape = np.array(self._dataobj.shape[:3]) diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 177688c216..3c47f82031 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -13,8 +13,15 @@ import numpy as np from numpy.testing import assert_allclose, assert_array_equal -from .. import (read_geometry, read_morph_data, read_annot, read_label, - write_geometry, write_morph_data, write_annot) +from .. import ( + read_geometry, + read_morph_data, + read_annot, + read_label, + write_geometry, + write_morph_data, + write_annot, +) from ..io import _pack_rgb from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data @@ -26,7 +33,7 @@ have_freesurfer = False if 'SUBJECTS_DIR' in os.environ: # May have Freesurfer installed with data - data_path = pjoin(os.environ["SUBJECTS_DIR"], DATA_SDIR) + data_path = pjoin(os.environ['SUBJECTS_DIR'], DATA_SDIR) have_freesurfer = isdir(data_path) else: # May have nibabel test data submodule checked out @@ -35,8 +42,10 @@ data_path = pjoin(nib_data, 'nitest-freesurfer', DATA_SDIR) have_freesurfer = isdir(data_path) -freesurfer_test = unittest.skipUnless(have_freesurfer, - f'cannot find freesurfer {DATA_SDIR} directory') +freesurfer_test = unittest.skipUnless( + have_freesurfer, f'cannot find freesurfer {DATA_SDIR} directory' +) + def _hash_file_content(fname): hasher = hashlib.md5() @@ -49,14 +58,15 @@ def _hash_file_content(fname): @freesurfer_test def test_geometry(): """Test IO of .surf""" - surf_path = pjoin(data_path, "surf", "lh.inflated") + surf_path = pjoin(data_path, 'surf', 'lh.inflated') coords, faces = read_geometry(surf_path) assert 0 == faces.min() assert coords.shape[0] == faces.max() + 1 - surf_path = pjoin(data_path, "surf", "lh.sphere") + surf_path = pjoin(data_path, 'surf', 'lh.sphere') coords, faces, volume_info, create_stamp = read_geometry( - surf_path, read_metadata=True, read_stamp=True) + surf_path, read_metadata=True, read_stamp=True + ) assert 0 == faces.min() assert coords.shape[0] == faces.max() + 1 @@ -68,20 +78,18 @@ def test_geometry(): # with respect to read_geometry() with InTemporaryDirectory(): surf_path = 'test' - create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" - volume_info['cras'] = [1., 2., 3.] + create_stamp = f'created by {getpass.getuser()} on {time.ctime()}' + volume_info['cras'] = [1.0, 2.0, 3.0] write_geometry(surf_path, coords, faces, create_stamp, volume_info) - coords2, faces2, volume_info2 = \ - read_geometry(surf_path, read_metadata=True) + coords2, faces2, volume_info2 = read_geometry(surf_path, read_metadata=True) for key in ('xras', 'yras', 'zras', 'cras'): - assert_allclose(volume_info2[key], volume_info[key], - rtol=1e-7, atol=1e-30) + assert_allclose(volume_info2[key], volume_info[key], rtol=1e-7, atol=1e-30) assert np.array_equal(volume_info2['cras'], volume_info['cras']) with open(surf_path, 'rb') as fobj: - np.fromfile(fobj, ">u1", 3) + np.fromfile(fobj, '>u1', 3) read_create_stamp = fobj.readline().decode().rstrip('\n') # now write an incomplete file @@ -92,7 +100,7 @@ def test_geometry(): assert any('extension code' in str(ww.message) for ww in w) volume_info['head'] = [1, 2] - with pytest.warns(UserWarning, match="Unknown extension"): + with pytest.warns(UserWarning, match='Unknown extension'): write_geometry(surf_path, coords, faces, create_stamp, volume_info) volume_info['a'] = 0 @@ -115,8 +123,9 @@ def test_geometry(): @needs_nibabel_data('nitest-freesurfer') def test_quad_geometry(): """Test IO of freesurfer quad files.""" - new_quad = pjoin(get_nibabel_data(), 'nitest-freesurfer', 'subjects', - 'bert', 'surf', 'lh.inflated.nofix') + new_quad = pjoin( + get_nibabel_data(), 'nitest-freesurfer', 'subjects', 'bert', 'surf', 'lh.inflated.nofix' + ) coords, faces = read_geometry(new_quad) assert 0 == faces.min() assert coords.shape[0] == (faces.max() + 1) @@ -124,14 +133,14 @@ def test_quad_geometry(): new_path = 'test' write_geometry(new_path, coords, faces) coords2, faces2 = read_geometry(new_path) - assert np.array_equal(coords,coords2) + assert np.array_equal(coords, coords2) assert np.array_equal(faces, faces2) @freesurfer_test def test_morph_data(): """Test IO of morphometry data file (eg. curvature).""" - curv_path = pjoin(data_path, "surf", "lh.curv") + curv_path = pjoin(data_path, 'surf', 'lh.curv') curv = read_morph_data(curv_path) assert -1.0 < curv.min() < 0 assert 0 < curv.max() < 1.0 @@ -159,21 +168,22 @@ def test_write_morph_data(): # Windows 32-bit overflows Python int if np.dtype(int) != np.dtype(np.int32): with pytest.raises(ValueError): - write_morph_data('test.curv', strided_scalar((big_num,))) + write_morph_data('test.curv', strided_scalar((big_num,))) for shape in bad_shapes: with pytest.raises(ValueError): write_morph_data('test.curv', values.reshape(shape)) + @freesurfer_test def test_annot(): """Test IO of .annot against freesurfer example data.""" annots = ['aparc', 'aparc.a2005s'] for a in annots: - annot_path = pjoin(data_path, "label", f"lh.{a}.annot") + annot_path = pjoin(data_path, 'label', f'lh.{a}.annot') hash_ = _hash_file_content(annot_path) labels, ctab, names = read_annot(annot_path) - assert labels.shape == (163842, ) + assert labels.shape == (163842,) assert ctab.shape == (len(names), 5) labels_orig = None @@ -186,8 +196,10 @@ def test_annot(): elif hash_ == 'd4f5b7cbc2ed363ac6fcf89e19353504': assert np.sum(labels_orig == 1639705) == 13327 else: - raise RuntimeError("Unknown freesurfer file. Please report " - "the problem to the maintainer of nibabel.") + raise RuntimeError( + 'Unknown freesurfer file. Please report ' + 'the problem to the maintainer of nibabel.' + ) # Test equivalence of freesurfer- and nibabel-generated annot files # with respect to read_annot() @@ -217,8 +229,7 @@ def test_read_write_annot(): # that at least one of each label value is present. Label # values are in the range (0, nlabels-1) - they are used # as indices into the lookup table (generated below). - labels = list(range(nlabels)) + \ - list(np.random.randint(0, nlabels, nvertices - nlabels)) + labels = list(range(nlabels)) + list(np.random.randint(0, nlabels, nvertices - nlabels)) labels = np.array(labels, dtype=np.int32) np.random.shuffle(labels) # Generate some random colours for the LUT @@ -229,9 +240,7 @@ def test_read_write_annot(): # for the annotation value. rgbal[0, 3] = 255 # Generate the annotation values for each LUT entry - rgbal[:, 4] = (rgbal[:, 0] + - rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16)) + rgbal[:, 4] = rgbal[:, 0] + rgbal[:, 1] * (2**8) + rgbal[:, 2] * (2**16) annot_path = 'c.annot' with InTemporaryDirectory(): write_annot(annot_path, labels, rgbal, names, fill_ctab=False) @@ -243,12 +252,11 @@ def test_read_write_annot(): def test_write_annot_fill_ctab(): - """Test the `fill_ctab` parameter to :func:`.write_annot`. """ + """Test the `fill_ctab` parameter to :func:`.write_annot`.""" nvertices = 10 nlabels = 3 names = [f'label {l}' for l in range(1, nlabels + 1)] - labels = list(range(nlabels)) + \ - list(np.random.randint(0, nlabels, nvertices - nlabels)) + labels = list(range(nlabels)) + list(np.random.randint(0, nlabels, nvertices - nlabels)) labels = np.array(labels, dtype=np.int32) np.random.shuffle(labels) rgba = np.array(np.random.randint(0, 255, (nlabels, 4)), dtype=np.int32) @@ -265,8 +273,9 @@ def test_write_annot_fill_ctab(): # values back. badannot = (10 * np.arange(nlabels, dtype=np.int32)).reshape(-1, 1) rgbal = np.hstack((rgba, badannot)) - with pytest.warns(UserWarning, - match=f'Annotation values in {annot_path} will be incorrect'): + with pytest.warns( + UserWarning, match=f'Annotation values in {annot_path} will be incorrect' + ): write_annot(annot_path, labels, rgbal, names, fill_ctab=False) labels2, rgbal2, names2 = read_annot(annot_path, orig_ids=True) names2 = [n.decode('ascii') for n in names2] @@ -276,13 +285,12 @@ def test_write_annot_fill_ctab(): # make sure a warning is *not* emitted if fill_ctab is False, but the # annotation values are correct. rgbal = np.hstack((rgba, np.zeros((nlabels, 1), dtype=np.int32))) - rgbal[:, 4] = (rgbal[:, 0] + - rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16)) + rgbal[:, 4] = rgbal[:, 0] + rgbal[:, 1] * (2**8) + rgbal[:, 2] * (2**16) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert all(f'Annotation values in {annot_path} will be incorrect' != str(ww.message) - for ww in w) + assert all( + f'Annotation values in {annot_path} will be incorrect' != str(ww.message) for ww in w + ) labels2, rgbal2, names2 = read_annot(annot_path) names2 = [n.decode('ascii') for n in names2] assert np.all(np.isclose(rgbal2[:, :4], rgba)) @@ -292,6 +300,7 @@ def test_write_annot_fill_ctab(): def test_read_annot_old_format(): """Test reading an old-style .annot file.""" + def gen_old_annot_file(fpath, nverts, labels, rgba, names): dt = '>i' vdata = np.zeros((nverts, 2), dtype=dt) @@ -316,12 +325,14 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): fbytes += rgba[i, :].astype(dt).tobytes() with open(fpath, 'wb') as f: f.write(fbytes) + with InTemporaryDirectory(): nverts = 10 nlabels = 3 names = [f'Label {l}' for l in range(nlabels)] - labels = np.concatenate(( - np.arange(nlabels), np.random.randint(0, nlabels, nverts - nlabels))) + labels = np.concatenate( + (np.arange(nlabels), np.random.randint(0, nlabels, nverts - nlabels)) + ) np.random.shuffle(labels) rgba = np.random.randint(0, 255, (nlabels, 4)) # write an old .annot file @@ -337,7 +348,7 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): @freesurfer_test def test_label(): """Test IO of .label""" - label_path = pjoin(data_path, "label", "lh.cortex.label") + label_path = pjoin(data_path, 'label', 'lh.cortex.label') label = read_label(label_path) # XXX : test more assert label.min() >= 0 diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 4c812087c2..29f1687c29 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -36,13 +36,14 @@ MGZ_FNAME = os.path.join(data_path, 'test.mgz') # sample voxel to ras matrix (mri_info --vox2ras) -v2r = np.array([[1, 2, 3, -13], [2, 3, 1, -11.5], - [3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32) +v2r = np.array( + [[1, 2, 3, -13], [2, 3, 1, -11.5], [3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32 +) # sample voxel to ras - tkr matrix (mri_info --vox2ras-tkr) -v2rtkr = np.array([[-1.0, 0.0, 0.0, 1.5], - [0.0, 0.0, 1.0, -2.5], - [0.0, -1.0, 0.0, 2.0], - [0.0, 0.0, 0.0, 1.0]], dtype=np.float32) +v2rtkr = np.array( + [[-1.0, 0.0, 0.0, 1.5], [0.0, 0.0, 1.0, -2.5], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 0.0, 1.0]], + dtype=np.float32, +) BIG_CODES = ('>', 'big', 'BIG', 'b', 'be', 'B', 'BE') LITTLE_CODES = ('<', 'little', 'l', 'le', 'L', 'LE') @@ -55,7 +56,6 @@ LITTLE_CODES += ('swapped', 's', 'S', '!') - def test_read_mgh(): # test.mgz was generated by the following command # mri_volsynth --dim 3 4 5 2 --vol test.mgz @@ -150,11 +150,7 @@ def test_set_zooms(): assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 2]) h.set_zooms([1, 1, 1, 3]) assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 3]) - for zooms in ((-1, 1, 1, 1), - (1, -1, 1, 1), - (1, 1, -1, 1), - (1, 1, 1, -1), - (1, 1, 1, 1, 5)): + for zooms in ((-1, 1, 1, 1), (1, -1, 1, 1), (1, 1, -1, 1), (1, 1, 1, -1), (1, 1, 1, 1, 5)): with pytest.raises(HeaderDataError): h.set_zooms(zooms) # smoke test for tr=0 @@ -162,7 +158,7 @@ def test_set_zooms(): def bad_dtype_mgh(): - """ This function raises an MGHError exception because + """This function raises an MGHError exception because uint16 is not a valid MGH datatype. """ # try to write an unsigned short and make sure it @@ -209,11 +205,15 @@ def test_header_updating(): mgz = load(MGZ_FNAME) hdr = mgz.header # Test against mri_info output - exp_aff = np.loadtxt(io.BytesIO(b""" + exp_aff = np.loadtxt( + io.BytesIO( + b""" 1.0000 2.0000 3.0000 -13.0000 2.0000 3.0000 1.0000 -11.5000 3.0000 1.0000 2.0000 -11.5000 - 0.0000 0.0000 0.0000 1.0000""")) + 0.0000 0.0000 0.0000 1.0000""" + ) + ) assert_almost_equal(mgz.affine, exp_aff, 6) assert_almost_equal(hdr.get_affine(), exp_aff, 6) # Test that initial wonky header elements have not changed @@ -224,7 +224,7 @@ def test_header_updating(): mgz2 = _mgh_rt(mgz, img_fobj) hdr2 = mgz2.header assert_almost_equal(hdr2.get_affine(), exp_aff, 6) - assert_array_equal(hdr2['delta'],1) + assert_array_equal(hdr2['delta'], 1) # Change affine, change underlying header info exp_aff_d = exp_aff.copy() exp_aff_d[0, -1] = -14 @@ -233,14 +233,14 @@ def test_header_updating(): mgz2.update_header() assert_almost_equal(hdr2.get_affine(), exp_aff_d, 6) RZS = exp_aff_d[:3, :3] - assert_almost_equal(hdr2['delta'], np.sqrt(np.sum(RZS ** 2, axis=0))) + assert_almost_equal(hdr2['delta'], np.sqrt(np.sum(RZS**2, axis=0))) assert_almost_equal(hdr2['Mdc'].T, RZS / hdr2['delta']) def test_cosine_order(): # Test we are interpreting the cosine order right data = np.arange(60).reshape((3, 4, 5)).astype(np.int32) - aff = np.diag([2., 3, 4, 1]) + aff = np.diag([2.0, 3, 4, 1]) aff[0] = [2, 1, 0, 10] img = MGHImage(data, aff) assert_almost_equal(img.affine, aff, 6) @@ -248,7 +248,7 @@ def test_cosine_order(): img2 = _mgh_rt(img, img_fobj) hdr2 = img2.header RZS = aff[:3, :3] - zooms = np.sqrt(np.sum(RZS ** 2, axis=0)) + zooms = np.sqrt(np.sum(RZS**2, axis=0)) assert_almost_equal(hdr2['Mdc'].T, RZS / zooms) assert_almost_equal(hdr2['delta'], zooms) @@ -259,7 +259,7 @@ def test_eq(): hdr2 = MGHHeader() assert hdr == hdr2 hdr.set_data_shape((2, 3, 4)) - assert(hdr != hdr2) + assert hdr != hdr2 hdr2.set_data_shape((2, 3, 4)) assert hdr == hdr2 @@ -286,7 +286,7 @@ def test_mgh_load_fileobj(): bio = io.BytesIO(contents) fm = MGHImage.make_file_map(mapping=dict(image=bio)) img2 = MGHImage.from_file_map(fm) - assert(img2.dataobj.file_like is bio) + assert img2.dataobj.file_like is bio assert_array_equal(img.get_fdata(), img2.get_fdata()) @@ -340,8 +340,8 @@ def test_mghheader_default_structarr(): class TestMGHImage(tsi.TestSpatialImage, tsi.MmapImageMixin): - """ Apply general image tests to MGHImage - """ + """Apply general image tests to MGHImage""" + image_class = MGHImage can_save = True @@ -419,7 +419,7 @@ def test_bytes(self): # Short binaryblocks give errors (here set through init) # Long binaryblocks are truncated with pytest.raises(WrapStructError): - self.header_class(bb[:self.header_class._hdrdtype.itemsize - 1]) + self.header_class(bb[: self.header_class._hdrdtype.itemsize - 1]) # Checking set to true by default, and prevents nonsense being # set into the header. @@ -440,7 +440,7 @@ def test_as_byteswapped(self): # same code just returns a copy for endianness in BIG_CODES: hdr2 = hdr.as_byteswapped(endianness) - assert(hdr2 is not hdr) + assert hdr2 is not hdr assert hdr2 == hdr # Different code raises error diff --git a/nibabel/funcs.py b/nibabel/funcs.py index e5db0477b0..02b9e3ecd7 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -1,4 +1,3 @@ - # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## @@ -7,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Processor functions for images """ +"""Processor functions for images""" import numpy as np from .orientations import io_orientation, OrientationError @@ -15,7 +14,7 @@ def squeeze_image(img): - """ Return image, remove axes length 1 at end of image shape + """Return image, remove axes length 1 at end of image shape For example, an image may have shape (10,20,30,1,1). In this case squeeze will result in an image with shape (10,20,30). See doctests @@ -80,14 +79,11 @@ def squeeze_image(img): return klass.from_image(img) shape = shape[:slen] data = np.asanyarray(img.dataobj).reshape(shape) - return klass(data, - img.affine, - img.header, - img.extra) + return klass(data, img.affine, img.header, img.extra) def concat_images(images, check_affines=True, axis=None): - r""" Concatenate images in list to single image, along specified dimension + r"""Concatenate images in list to single image, along specified dimension Parameters ---------- @@ -108,11 +104,10 @@ def concat_images(images, check_affines=True, axis=None): New image resulting from concatenating `images` across last dimension """ - images = [load(img) if not hasattr(img, 'get_data') - else img for img in images] + images = [load(img) if not hasattr(img, 'get_data') else img for img in images] n_imgs = len(images) if n_imgs == 0: - raise ValueError("Cannot concatenate an empty list of images.") + raise ValueError('Cannot concatenate an empty list of images.') img0 = images[0] affine = img0.affine header = img0.header @@ -121,7 +116,7 @@ def concat_images(images, check_affines=True, axis=None): n_dim = len(shape0) if axis is None: # collect images in output array for efficiency - out_shape = (n_imgs, ) + shape0 + out_shape = (n_imgs,) + shape0 out_data = np.empty(out_shape) else: # collect images in list for use with np.concatenate @@ -135,8 +130,10 @@ def concat_images(images, check_affines=True, axis=None): if len(img.shape) != n_dim: raise ValueError(f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}') if not np.all(np.array(img.shape)[idx_mask] == masked_shape): - raise ValueError(f'shape {img.shape} for image {i} not compatible with ' - f'first image shape {shape0} with axis == {axis}') + raise ValueError( + f'shape {img.shape} for image {i} not compatible with ' + f'first image shape {shape0} with axis == {axis}' + ) if check_affines and not np.all(img.affine == affine): raise ValueError(f'Affine for image {i} does not match affine for first image') # Do not fill cache in image if it is empty @@ -151,7 +148,7 @@ def concat_images(images, check_affines=True, axis=None): def four_to_three(img): - """ Create 3D images from 4D image by slicing over last axis + """Create 3D images from 4D image by slicing over last axis Parameters ---------- @@ -180,7 +177,7 @@ def four_to_three(img): def as_closest_canonical(img, enforce_diag=False): - """ Return `img` with data reordered to be closest to canonical + """Return `img` with data reordered to be closest to canonical Canonical order is the ordering of the output axes. @@ -212,6 +209,6 @@ def as_closest_canonical(img, enforce_diag=False): def _aff_is_diag(aff): - """ Utility function returning True if affine is nearly diagonal """ + """Utility function returning True if affine is nearly diagonal""" rzs_aff = aff[:3, :3] return np.allclose(rzs_aff, np.diag(np.diag(rzs_aff))) diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index 54bfbd0ffa..2faaf5ab57 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -17,5 +17,12 @@ gifti """ -from .gifti import (GiftiMetaData, GiftiNVPairs, GiftiLabelTable, GiftiLabel, - GiftiCoordSystem, GiftiDataArray, GiftiImage) +from .gifti import ( + GiftiMetaData, + GiftiNVPairs, + GiftiLabelTable, + GiftiLabel, + GiftiCoordSystem, + GiftiDataArray, + GiftiImage, +) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 31df1d813e..8f5efa8ad8 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Classes defining Gifti objects +"""Classes defining Gifti objects The Gifti specification was (at time of writing) available as a PDF download from http://www.nitrc.org/projects/gifti/ @@ -21,19 +21,16 @@ from ..filebasedimages import SerializableImage from ..nifti1 import data_type_codes, xform_codes, intent_codes from ..caret import CaretMetaData -from .util import (array_index_order_codes, gifti_encoding_codes, - gifti_endian_codes, KIND2FMT) +from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes, KIND2FMT from ..deprecated import deprecate_with_version class _GiftiMDList(list): """List view of GiftiMetaData object that will translate most operations""" + def __init__(self, metadata): self._md = metadata - super().__init__( - GiftiNVPairs._private_init(k, v, metadata) - for k, v in metadata.items() - ) + super().__init__(GiftiNVPairs._private_init(k, v, metadata) for k, v in metadata.items()) def append(self, nvpair): self._md[nvpair.name] = nvpair.value @@ -63,12 +60,11 @@ def remove(self, nvpair): class GiftiMetaData(CaretMetaData): - """ A sequence of GiftiNVPairs containing metadata for a gifti data array - """ + """A sequence of GiftiNVPairs containing metadata for a gifti data array""" @staticmethod def _sanitize(args, kwargs): - """ Sanitize and warn on deprecated arguments + """Sanitize and warn on deprecated arguments Accept nvpair positional/keyword argument that is a single ``GiftiNVPairs`` object. @@ -94,31 +90,34 @@ def _sanitize(args, kwargs): # Positional arg dep_init |= not kwargs and len(args) == 1 and isinstance(args[0], GiftiNVPairs) # Keyword arg - dep_init |= not args and list(kwargs) == ["nvpair"] + dep_init |= not args and list(kwargs) == ['nvpair'] if not dep_init: return args, kwargs warnings.warn( - "GiftiMetaData now has a dict-like interface. " - "See ``pydoc dict`` for initialization options. " - "Passing ``GiftiNVPairs()`` or using the ``nvpair`` " - "keyword will fail or behave unexpectedly in NiBabel 6.0.", - FutureWarning, stacklevel=3) - pair = args[0] if args else kwargs.get("nvpair") + 'GiftiMetaData now has a dict-like interface. ' + 'See ``pydoc dict`` for initialization options. ' + 'Passing ``GiftiNVPairs()`` or using the ``nvpair`` ' + 'keyword will fail or behave unexpectedly in NiBabel 6.0.', + FutureWarning, + stacklevel=3, + ) + pair = args[0] if args else kwargs.get('nvpair') return (), {pair.name: pair.value} @property @deprecate_with_version( - 'The data attribute is deprecated. Use GiftiMetaData object ' - 'directly as a dict.', - '4.0', '6.0') + 'The data attribute is deprecated. Use GiftiMetaData object ' 'directly as a dict.', + '4.0', + '6.0', + ) def data(self): return _GiftiMDList(self) @classmethod @deprecate_with_version( - 'from_dict class method deprecated. Use GiftiMetaData directly.', - '4.0', '6.0') + 'from_dict class method deprecated. Use GiftiMetaData directly.', '4.0', '6.0' + ) def from_dict(klass, data_dict): return klass(data_dict) @@ -126,9 +125,11 @@ def from_dict(klass, data_dict): @deprecate_with_version( 'metadata property deprecated. Use GiftiMetaData object ' 'as dict or pass to dict() for a standard dictionary.', - '4.0', '6.0') + '4.0', + '6.0', + ) def metadata(self): - """ Returns metadata as dictionary """ + """Returns metadata as dictionary""" return dict(self) def print_summary(self): @@ -136,17 +137,19 @@ def print_summary(self): class GiftiNVPairs: - """ Gifti name / value pairs + """Gifti name / value pairs Attributes ---------- name : str value : str """ + @deprecate_with_version( - 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' - 'as a dict, instead.', - '4.0', '6.0') + 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' 'as a dict, instead.', + '4.0', + '6.0', + ) def __init__(self, name='', value=''): self._name = name self._value = value @@ -188,7 +191,7 @@ def value(self, val): class GiftiLabelTable(xml.XmlSerializable): - """ Gifti label table: a sequence of key, label pairs + """Gifti label table: a sequence of key, label pairs From the gifti spec dated 2011-01-14: The label table is used by DataArrays whose values are an key into the @@ -200,7 +203,7 @@ def __init__(self): self.labels = [] def __repr__(self): - return f"" + return f'' def get_labels_as_dict(self): self.labels_as_dict = {} @@ -224,7 +227,7 @@ def print_summary(self): class GiftiLabel(xml.XmlSerializable): - """ Gifti label: association of integer key with optional RGBA values + """Gifti label: association of integer key with optional RGBA values Quotes are from the gifti spec dated 2011-01-14. @@ -261,20 +264,18 @@ def __init__(self, key=0, red=None, green=None, blue=None, alpha=None): self.alpha = alpha def __repr__(self): - chars = 255 * np.array( - [self.red or 0, self.green or 0, self.blue or 0, self.alpha or 0] - ) + chars = 255 * np.array([self.red or 0, self.green or 0, self.blue or 0, self.alpha or 0]) r, g, b, a = chars.astype('u1') return f'' @property def rgba(self): - """ Returns RGBA as tuple """ + """Returns RGBA as tuple""" return (self.red, self.green, self.blue, self.alpha) @rgba.setter def rgba(self, rgba): - """ Set RGBA via sequence + """Set RGBA via sequence Parameters ---------- @@ -296,7 +297,7 @@ def _arr2txt(arr, elem_fmt): class GiftiCoordSystem(xml.XmlSerializable): - """ Gifti coordinate system transform matrix + """Gifti coordinate system transform matrix Quotes are from the gifti spec dated 2011-01-14. @@ -345,7 +346,7 @@ def __init__(self, dataspace=0, xformspace=0, xform=None): def __repr__(self): src = xform_codes.label[self.dataspace] dst = xform_codes.label[self.xformspace] - return f"" + return f'' def _to_xml_element(self): coord_xform = xml.Element('CoordinateSystemTransformMatrix') @@ -365,9 +366,9 @@ def print_summary(self): def _data_tag_element(dataarray, encoding, dtype, ordering): - """ Creates data tag with given `encoding`, returns as XML element - """ + """Creates data tag with given `encoding`, returns as XML element""" import zlib + order = array_index_order_codes.npcode[ordering] enclabel = gifti_encoding_codes.label[encoding] if enclabel == 'ASCII': @@ -378,7 +379,7 @@ def _data_tag_element(dataarray, encoding, dtype, ordering): out = zlib.compress(out) da = base64.b64encode(out).decode() elif enclabel == 'External': - raise NotImplementedError("In what format are the external files?") + raise NotImplementedError('In what format are the external files?') else: da = '' @@ -388,7 +389,7 @@ def _data_tag_element(dataarray, encoding, dtype, ordering): class GiftiDataArray(xml.XmlSerializable): - """ Container for Gifti numerical data array and associated metadata + """Container for Gifti numerical data array and associated metadata Quotes are from the gifti spec dated 2011-01-14. @@ -437,17 +438,19 @@ class GiftiDataArray(xml.XmlSerializable): Position in bytes within `ext_fname` at which to start reading data. """ - def __init__(self, - data=None, - intent='NIFTI_INTENT_NONE', - datatype=None, - encoding="GIFTI_ENCODING_B64GZ", - endian=sys.byteorder, - coordsys=None, - ordering="C", - meta=None, - ext_fname='', - ext_offset=0): + def __init__( + self, + data=None, + intent='NIFTI_INTENT_NONE', + datatype=None, + encoding='GIFTI_ENCODING_B64GZ', + endian=sys.byteorder, + coordsys=None, + ordering='C', + meta=None, + ext_fname='', + ext_offset=0, + ): """ Returns a shell object that cannot be saved. """ @@ -460,15 +463,19 @@ def __init__(self, self.endian = gifti_endian_codes.code[endian] self.coordsys = coordsys or GiftiCoordSystem() self.ind_ord = array_index_order_codes.code[ordering] - self.meta = (GiftiMetaData() if meta is None else - meta if isinstance(meta, GiftiMetaData) else - GiftiMetaData(meta)) + self.meta = ( + GiftiMetaData() + if meta is None + else meta + if isinstance(meta, GiftiMetaData) + else GiftiMetaData(meta) + ) self.ext_fname = ext_fname self.ext_offset = ext_offset self.dims = [] if self.data is None else list(self.data.shape) def __repr__(self): - return f"" + return f'' @property def num_dim(self): @@ -479,15 +486,19 @@ def _to_xml_element(self): self.endian = gifti_endian_codes.code[sys.byteorder] # All attribute values must be strings - data_array = xml.Element('DataArray', attrib={ - 'Intent': intent_codes.niistring[self.intent], - 'DataType': data_type_codes.niistring[self.datatype], - 'ArrayIndexingOrder': array_index_order_codes.label[self.ind_ord], - 'Dimensionality': str(self.num_dim), - 'Encoding': gifti_encoding_codes.specs[self.encoding], - 'Endian': gifti_endian_codes.specs[self.endian], - 'ExternalFileName': self.ext_fname, - 'ExternalFileOffset': str(self.ext_offset)}) + data_array = xml.Element( + 'DataArray', + attrib={ + 'Intent': intent_codes.niistring[self.intent], + 'DataType': data_type_codes.niistring[self.datatype], + 'ArrayIndexingOrder': array_index_order_codes.label[self.ind_ord], + 'Dimensionality': str(self.num_dim), + 'Encoding': gifti_encoding_codes.specs[self.encoding], + 'Endian': gifti_endian_codes.specs[self.endian], + 'ExternalFileName': self.ext_fname, + 'ExternalFileOffset': str(self.ext_offset), + }, + ) for di, dn in enumerate(self.dims): data_array.attrib['Dim%d' % di] = str(dn) @@ -497,18 +508,20 @@ def _to_xml_element(self): data_array.append(self.coordsys._to_xml_element()) # write data array depending on the encoding data_array.append( - _data_tag_element(self.data, - gifti_encoding_codes.specs[self.encoding], - data_type_codes.dtype[self.datatype], - self.ind_ord)) + _data_tag_element( + self.data, + gifti_encoding_codes.specs[self.encoding], + data_type_codes.dtype[self.datatype], + self.ind_ord, + ) + ) return data_array def print_summary(self): print('Intent: ', intent_codes.niistring[self.intent]) print('DataType: ', data_type_codes.niistring[self.datatype]) - print('ArrayIndexingOrder: ', - array_index_order_codes.label[self.ind_ord]) + print('ArrayIndexingOrder: ', array_index_order_codes.label[self.ind_ord]) print('Dimensionality: ', self.num_dim) print('Dimensions: ', self.dims) print('Encoding: ', gifti_encoding_codes.specs[self.encoding]) @@ -522,12 +535,12 @@ def print_summary(self): @property def metadata(self): - """ Returns metadata as dictionary """ + """Returns metadata as dictionary""" return dict(self.meta) class GiftiImage(xml.XmlSerializable, SerializableImage): - """ GIFTI image object + """GIFTI image object The Gifti spec suggests using the following suffixes to your filename when saving each specific type of data: @@ -555,6 +568,7 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): The Gifti file is stored in endian convention of the current machine. """ + valid_exts = ('.gii',) files_types = (('image', '.gii'),) _compressed_suffixes = ('.gz', '.bz2') @@ -564,10 +578,17 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): # the class has been defined, at the end of the class definition. parser = None - def __init__(self, header=None, extra=None, file_map=None, meta=None, - labeltable=None, darrays=None, version="1.0"): - super(GiftiImage, self).__init__(header=header, extra=extra, - file_map=file_map) + def __init__( + self, + header=None, + extra=None, + file_map=None, + meta=None, + labeltable=None, + darrays=None, + version='1.0', + ): + super(GiftiImage, self).__init__(header=header, extra=extra, file_map=file_map) if darrays is None: darrays = [] if meta is None: @@ -591,14 +612,14 @@ def labeltable(self): @labeltable.setter def labeltable(self, labeltable): - """ Set the labeltable for this GiftiImage + """Set the labeltable for this GiftiImage Parameters ---------- labeltable : :class:`GiftiLabelTable` instance """ if not isinstance(labeltable, GiftiLabelTable): - raise TypeError("Not a valid GiftiLabelTable instance") + raise TypeError('Not a valid GiftiLabelTable instance') self._labeltable = labeltable @property @@ -607,41 +628,40 @@ def meta(self): @meta.setter def meta(self, meta): - """ Set the metadata for this GiftiImage + """Set the metadata for this GiftiImage Parameters ---------- meta : :class:`GiftiMetaData` instance """ if not isinstance(meta, GiftiMetaData): - raise TypeError("Not a valid GiftiMetaData instance") + raise TypeError('Not a valid GiftiMetaData instance') self._meta = meta def add_gifti_data_array(self, dataarr): - """ Adds a data array to the GiftiImage + """Adds a data array to the GiftiImage Parameters ---------- dataarr : :class:`GiftiDataArray` instance """ if not isinstance(dataarr, GiftiDataArray): - raise TypeError("Not a valid GiftiDataArray instance") + raise TypeError('Not a valid GiftiDataArray instance') self.darrays.append(dataarr) def remove_gifti_data_array(self, ith): - """ Removes the ith data array element from the GiftiImage """ + """Removes the ith data array element from the GiftiImage""" self.darrays.pop(ith) def remove_gifti_data_array_by_intent(self, intent): - """ Removes all the data arrays with the given intent type """ + """Removes all the data arrays with the given intent type""" intent2remove = intent_codes.code[intent] for dele in self.darrays: if dele.intent == intent2remove: self.darrays.remove(dele) def get_arrays_from_intent(self, intent): - """ Return list of GiftiDataArray elements matching given intent - """ + """Return list of GiftiDataArray elements matching given intent""" it = intent_codes.code[intent] return [x for x in self.darrays if x.intent == it] @@ -800,9 +820,9 @@ def print_summary(self): print('----end----') def _to_xml_element(self): - GIFTI = xml.Element('GIFTI', attrib={ - 'Version': self.version, - 'NumberOfDataArrays': str(self.numDA)}) + GIFTI = xml.Element( + 'GIFTI', attrib={'Version': self.version, 'NumberOfDataArrays': str(self.numDA)} + ) if self.meta is not None: GIFTI.append(self.meta._to_xml_element()) if self.labeltable is not None: @@ -812,16 +832,18 @@ def _to_xml_element(self): return GIFTI def to_xml(self, enc='utf-8'): - """ Return XML corresponding to image content """ + """Return XML corresponding to image content""" return b""" -""" + xml.XmlSerializable.to_xml(self, enc) +""" + xml.XmlSerializable.to_xml( + self, enc + ) # Avoid the indirection of going through to_file_map to_bytes = to_xml def to_file_map(self, file_map=None): - """ Save the current image to the specified file_map + """Save the current image to the specified file_map Parameters ---------- @@ -880,4 +902,5 @@ def from_filename(klass, filename, buffer_size=35000000, mmap=True): # Now GiftiImage is defined, we can import the parser module and set the parser from .parse_gifti_fast import GiftiImageParser + GiftiImage.parser = GiftiImageParser diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index ed55fd97ea..5de4c2e22c 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -17,17 +17,21 @@ import numpy as np -from .gifti import (GiftiMetaData, GiftiImage, GiftiLabel, - GiftiLabelTable, GiftiDataArray, - GiftiCoordSystem) -from .util import (array_index_order_codes, gifti_encoding_codes, - gifti_endian_codes) +from .gifti import ( + GiftiMetaData, + GiftiImage, + GiftiLabel, + GiftiLabelTable, + GiftiDataArray, + GiftiCoordSystem, +) +from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes from ..nifti1 import data_type_codes, xform_codes, intent_codes from ..xmlutils import XmlParser class GiftiParseError(ExpatError): - """ Gifti-specific parsing error """ + """Gifti-specific parsing error""" def read_data_block(darray, fname, data, mmap): @@ -60,8 +64,7 @@ def read_data_block(darray, fname, data, mmap): ``numpy.ndarray`` or ``numpy.memmap`` containing the parsed data """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " - "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") if mmap is True: mmap = 'c' enclabel = gifti_encoding_codes.label[darray.encoding] @@ -81,8 +84,9 @@ def read_data_block(darray, fname, data, mmap): # attributes if enclabel == 'External': if fname is None: - raise GiftiParseError('ExternalFileBinary is not supported ' - 'when loading from in-memory XML') + raise GiftiParseError( + 'ExternalFileBinary is not supported ' 'when loading from in-memory XML' + ) ext_fname = op.join(op.dirname(fname), darray.ext_fname) if not op.exists(ext_fname): raise GiftiParseError('Cannot locate external file ' + ext_fname) @@ -90,11 +94,13 @@ def read_data_block(darray, fname, data, mmap): newarr = None if mmap: try: - newarr = np.memmap(ext_fname, - dtype=dtype, - mode=mmap, - offset=darray.ext_offset, - shape=tuple(darray.dims)) + newarr = np.memmap( + ext_fname, + dtype=dtype, + mode=mmap, + offset=darray.ext_offset, + shape=tuple(darray.dims), + ) # If the memmap fails, we ignore the error and load the data into # memory below except (AttributeError, TypeError, ValueError): @@ -128,13 +134,11 @@ def read_data_block(darray, fname, data, mmap): sh = tuple(darray.dims) if len(newarr.shape) != len(sh): - newarr = newarr.reshape( - sh, order=array_index_order_codes.npcode[darray.ind_ord]) + newarr = newarr.reshape(sh, order=array_index_order_codes.npcode[darray.ind_ord]) # check if we need to byteswap required_byteorder = gifti_endian_codes.byteorder[darray.endian] - if (required_byteorder in ('big', 'little') and - required_byteorder != sys.byteorder): + if required_byteorder in ('big', 'little') and required_byteorder != sys.byteorder: newarr = newarr.byteswap() return newarr @@ -145,12 +149,10 @@ def _str2int(in_str): class GiftiImageParser(XmlParser): - - def __init__(self, encoding=None, buffer_size=35000000, verbose=0, - mmap=True): - super(GiftiImageParser, self).__init__(encoding=encoding, - buffer_size=buffer_size, - verbose=verbose) + def __init__(self, encoding=None, buffer_size=35000000, verbose=0, mmap=True): + super(GiftiImageParser, self).__init__( + encoding=encoding, buffer_size=buffer_size, verbose=verbose + ) # output self.img = None @@ -220,45 +222,44 @@ def StartElementHandler(self, name, attrs): elif name == 'Label': self.label = GiftiLabel() - if "Index" in attrs: - self.label.key = int(attrs["Index"]) - if "Key" in attrs: - self.label.key = int(attrs["Key"]) - if "Red" in attrs: - self.label.red = float(attrs["Red"]) - if "Green" in attrs: - self.label.green = float(attrs["Green"]) - if "Blue" in attrs: - self.label.blue = float(attrs["Blue"]) - if "Alpha" in attrs: - self.label.alpha = float(attrs["Alpha"]) + if 'Index' in attrs: + self.label.key = int(attrs['Index']) + if 'Key' in attrs: + self.label.key = int(attrs['Key']) + if 'Red' in attrs: + self.label.red = float(attrs['Red']) + if 'Green' in attrs: + self.label.green = float(attrs['Green']) + if 'Blue' in attrs: + self.label.blue = float(attrs['Blue']) + if 'Alpha' in attrs: + self.label.alpha = float(attrs['Alpha']) self.write_to = 'Label' elif name == 'DataArray': self.da = GiftiDataArray() - if "Intent" in attrs: - self.da.intent = intent_codes.code[attrs["Intent"]] - if "DataType" in attrs: - self.da.datatype = data_type_codes.code[attrs["DataType"]] - if "ArrayIndexingOrder" in attrs: - self.da.ind_ord = array_index_order_codes.code[ - attrs["ArrayIndexingOrder"]] - num_dim = int(attrs.get("Dimensionality", 0)) + if 'Intent' in attrs: + self.da.intent = intent_codes.code[attrs['Intent']] + if 'DataType' in attrs: + self.da.datatype = data_type_codes.code[attrs['DataType']] + if 'ArrayIndexingOrder' in attrs: + self.da.ind_ord = array_index_order_codes.code[attrs['ArrayIndexingOrder']] + num_dim = int(attrs.get('Dimensionality', 0)) for i in range(num_dim): - di = f"Dim{i}" + di = f'Dim{i}' if di in attrs: self.da.dims.append(int(attrs[di])) # dimensionality has to correspond to the number of DimX given # TODO (bcipolli): don't assert; raise parse warning, and recover. assert len(self.da.dims) == num_dim - if "Encoding" in attrs: - self.da.encoding = gifti_encoding_codes.code[attrs["Encoding"]] - if "Endian" in attrs: - self.da.endian = gifti_endian_codes.code[attrs["Endian"]] - if "ExternalFileName" in attrs: - self.da.ext_fname = attrs["ExternalFileName"] - if "ExternalFileOffset" in attrs: - self.da.ext_offset = _str2int(attrs["ExternalFileOffset"]) + if 'Encoding' in attrs: + self.da.encoding = gifti_encoding_codes.code[attrs['Encoding']] + if 'Endian' in attrs: + self.da.endian = gifti_endian_codes.code[attrs['Endian']] + if 'ExternalFileName' in attrs: + self.da.ext_fname = attrs['ExternalFileName'] + if 'ExternalFileOffset' in attrs: + self.da.ext_offset = _str2int(attrs['ExternalFileOffset']) self.img.darrays.append(self.da) self.fsm_state.append('DataArray') @@ -292,9 +293,10 @@ def EndElementHandler(self, name): if name == 'GIFTI': if hasattr(self, 'expected_numDA') and self.expected_numDA != self.img.numDA: - warnings.warn("Actual # of data arrays does not match " - "# expected: %d != %d." % (self.expected_numDA, - self.img.numDA)) + warnings.warn( + 'Actual # of data arrays does not match ' + '# expected: %d != %d.' % (self.expected_numDA, self.img.numDA) + ) # remove last element of the list self.fsm_state.pop() # assert len(self.fsm_state) == 0 @@ -333,8 +335,7 @@ def EndElementHandler(self, name): self.fsm_state.pop() self.coordsys = None - elif name in ['DataSpace', 'TransformedSpace', 'MatrixData', - 'Name', 'Value', 'Data']: + elif name in ['DataSpace', 'TransformedSpace', 'MatrixData', 'Name', 'Value', 'Data']: self.write_to = None elif name == 'Label': @@ -343,7 +344,7 @@ def EndElementHandler(self, name): self.write_to = None def CharacterDataHandler(self, data): - """ Collect character data chunks pending collation + """Collect character data chunks pending collation The parser breaks the data up into chunks of size depending on the buffer_size of the parser. A large bit of character data, with @@ -356,7 +357,7 @@ def CharacterDataHandler(self, data): self._char_blocks.append(data) def flush_chardata(self): - """ Collate and process collected character data""" + """Collate and process collected character data""" # Nothing to do for empty elements, except for Data elements which # are within a DataArray with an external file if self.write_to != 'Data' and self._char_blocks is None: @@ -395,8 +396,7 @@ def flush_chardata(self): c.close() elif self.write_to == 'Data': - self.da.data = read_data_block(self.da, self.fname, data, - self.mmap) + self.da.data = read_data_block(self.da, self.fname, data, self.mmap) # update the endianness according to the # current machine setting self.endian = gifti_endian_codes.code[sys.byteorder] diff --git a/nibabel/gifti/tests/test_1.py b/nibabel/gifti/tests/test_1.py index a464ee49ef..0e19e59c43 100644 --- a/nibabel/gifti/tests/test_1.py +++ b/nibabel/gifti/tests/test_1.py @@ -1,4 +1,4 @@ -""" Testing loading of gifti file +"""Testing loading of gifti file The file is ``test_1`` because we are testing a bug where, if we try to load a file before instantiating some Gifti objects, loading fails with an diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 1fa4eb8917..73ae9ed95d 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -1,4 +1,4 @@ -""" Testing gifti objects +"""Testing gifti objects """ import warnings import sys @@ -9,17 +9,29 @@ from nibabel.tmpdirs import InTemporaryDirectory from ... import load -from .. import (GiftiImage, GiftiDataArray, GiftiLabel, - GiftiLabelTable, GiftiMetaData, GiftiNVPairs, - GiftiCoordSystem) +from .. import ( + GiftiImage, + GiftiDataArray, + GiftiLabel, + GiftiLabelTable, + GiftiMetaData, + GiftiNVPairs, + GiftiCoordSystem, +) from ...nifti1 import data_type_codes from ...fileholders import FileHolder from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest from ...testing import test_data -from .test_parse_gifti_fast import (DATA_FILE1, DATA_FILE2, DATA_FILE3, - DATA_FILE4, DATA_FILE5, DATA_FILE6) +from .test_parse_gifti_fast import ( + DATA_FILE1, + DATA_FILE2, + DATA_FILE3, + DATA_FILE4, + DATA_FILE5, + DATA_FILE6, +) import itertools @@ -51,6 +63,7 @@ def test_agg_data(): assert surf_gii_img.agg_data(('pointset', 'triangle')) == (point_data, triangle_data) assert surf_gii_img.agg_data(('triangle', 'pointset')) == (triangle_data, point_data) + def test_gifti_image(): # Check that we're not modifying the default empty list in the default # arguments. @@ -104,11 +117,13 @@ def test_gifti_image_bad_inputs(): # Try to set to non-table def assign_labeltable(val): img.labeltable = val + pytest.raises(TypeError, assign_labeltable, 'not-a-table') # Try to set to non-table def assign_metadata(val): img.meta = val + pytest.raises(TypeError, assign_metadata, 'not-a-meta') @@ -172,7 +187,7 @@ def test_dataarray_init(): assert gda(ordering='ColumnMajorOrder').ind_ord == 2 pytest.raises(KeyError, gda, ordering='not an ordering') # metadata - meta_dict=dict(one=1, two=2) + meta_dict = dict(one=1, two=2) assert gda(meta=GiftiMetaData(meta_dict)).meta == meta_dict assert gda(meta=meta_dict).meta == meta_dict assert gda(meta=None).meta == {} @@ -307,6 +322,7 @@ def test_gifti_label_rgba(): def assign_rgba(gl, val): gl.rgba = val + gl3 = GiftiLabel(**kwargs) pytest.raises(ValueError, assign_rgba, gl3, rgba[:2]) pytest.raises(ValueError, assign_rgba, gl3, rgba.tolist() + rgba.tolist()) @@ -318,14 +334,14 @@ def assign_rgba(gl, val): def test_print_summary(): - for fil in [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, - DATA_FILE5, DATA_FILE6]: + for fil in [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6]: gimg = load(fil) gimg.print_summary() def test_gifti_coord(): from ..gifti import GiftiCoordSystem + gcs = GiftiCoordSystem() assert gcs.xform is not None @@ -339,7 +355,7 @@ def test_gifti_round_trip(): # From section 14.4 in GIFTI Surface Data Format Version 1.0 # (with some adaptations) - test_data = b''' + test_data = b""" -''' +""" exp_verts = np.zeros((4, 3)) exp_verts[0, 0] = 10.5 exp_verts[1, 1] = 20.5 exp_verts[2, 2] = 30.5 - exp_faces = np.asarray([[0, 1, 2], [1, 2, 3], [0, 1, 3], [0, 2, 3]], - dtype=np.int32) + exp_faces = np.asarray([[0, 1, 2], [1, 2, 3], [0, 1, 3], [0, 2, 3]], dtype=np.int32) def _check_gifti(gio): vertices = gio.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0].data @@ -449,13 +464,13 @@ def test_data_array_round_trip(): def test_darray_dtype_coercion_failures(): dtypes = (np.uint8, np.int32, np.int64, np.float32, np.float64) encodings = ('ASCII', 'B64BIN', 'B64GZ') - for data_dtype, darray_dtype, encoding in itertools.product(dtypes, - dtypes, - encodings): - da = GiftiDataArray(np.arange(10).astype(data_dtype), - encoding=encoding, - intent='NIFTI_INTENT_NODE_INDEX', - datatype=darray_dtype) + for data_dtype, darray_dtype, encoding in itertools.product(dtypes, dtypes, encodings): + da = GiftiDataArray( + np.arange(10).astype(data_dtype), + encoding=encoding, + intent='NIFTI_INTENT_NODE_INDEX', + datatype=darray_dtype, + ) gii = GiftiImage(darrays=[da]) gii_copy = GiftiImage.from_bytes(gii.to_bytes()) da_copy = gii_copy.darrays[0] diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index d376611581..d1f61d3c22 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -46,80 +46,113 @@ numDA = [2, 1, 1, 1, 2, 1, 2] DATA_FILE1_darr1 = np.array( - [[-16.07201, -66.187515, 21.266994], - [-16.705893, -66.054337, 21.232786], - [-17.614349, -65.401642, 21.071466]]) + [ + [-16.07201, -66.187515, 21.266994], + [-16.705893, -66.054337, 21.232786], + [-17.614349, -65.401642, 21.071466], + ] +) DATA_FILE1_darr2 = np.array([0, 1, 2]) -DATA_FILE2_darr1 = np.array([[0.43635699], - [0.270017], - [0.133239], - [0.35054299], - [0.26538199], - [0.32122701], - [0.23495001], - [0.26671499], - [0.306851], - [0.36302799]], dtype=np.float32) +DATA_FILE2_darr1 = np.array( + [ + [0.43635699], + [0.270017], + [0.133239], + [0.35054299], + [0.26538199], + [0.32122701], + [0.23495001], + [0.26671499], + [0.306851], + [0.36302799], + ], + dtype=np.float32, +) DATA_FILE3_darr1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]) -DATA_FILE4_darr1 = np.array([[-0.57811606], - [-0.53871965], - [-0.44602534], - [-0.56532663], - [-0.51392376], - [-0.43225467], - [-0.54646534], - [-0.48011276], - [-0.45624232], - [-0.31101292]], dtype=np.float32) - -DATA_FILE5_darr1 = np.array([[155.17539978, 135.58103943, 98.30715179], - [140.33973694, 190.0491333, 73.24776459], - [157.3598938, 196.97969055, 83.65809631], - [171.46174622, 137.43661499, 78.4709549], - [148.54592896, 97.06752777, 65.96373749], - [123.45701599, 111.46841431, 66.3571167], - [135.30892944, 202.28720093, 36.38148499], - [178.28155518, 162.59469604, 37.75128937], - [178.11087036, 115.28820038, 57.17986679], - [142.81582642, 82.82115173, 31.02205276]], dtype=np.float32) - -DATA_FILE5_darr2 = np.array([[6402, 17923, 25602], - [14085, 25602, 17923], - [25602, 14085, 4483], - [17923, 1602, 14085], - [4483, 25603, 25602], - [25604, 25602, 25603], - [25602, 25604, 6402], - [25603, 3525, 25604], - [1123, 17922, 12168], - [25604, 12168, 17922]], dtype=np.int32) +DATA_FILE4_darr1 = np.array( + [ + [-0.57811606], + [-0.53871965], + [-0.44602534], + [-0.56532663], + [-0.51392376], + [-0.43225467], + [-0.54646534], + [-0.48011276], + [-0.45624232], + [-0.31101292], + ], + dtype=np.float32, +) + +DATA_FILE5_darr1 = np.array( + [ + [155.17539978, 135.58103943, 98.30715179], + [140.33973694, 190.0491333, 73.24776459], + [157.3598938, 196.97969055, 83.65809631], + [171.46174622, 137.43661499, 78.4709549], + [148.54592896, 97.06752777, 65.96373749], + [123.45701599, 111.46841431, 66.3571167], + [135.30892944, 202.28720093, 36.38148499], + [178.28155518, 162.59469604, 37.75128937], + [178.11087036, 115.28820038, 57.17986679], + [142.81582642, 82.82115173, 31.02205276], + ], + dtype=np.float32, +) + +DATA_FILE5_darr2 = np.array( + [ + [6402, 17923, 25602], + [14085, 25602, 17923], + [25602, 14085, 4483], + [17923, 1602, 14085], + [4483, 25603, 25602], + [25604, 25602, 25603], + [25602, 25604, 6402], + [25603, 3525, 25604], + [1123, 17922, 12168], + [25604, 12168, 17922], + ], + dtype=np.int32, +) DATA_FILE6_darr1 = np.array([9182740, 9182740, 9182740], dtype=np.float32) -DATA_FILE7_darr1 = np.array([[-1., -1., -1.], - [-1., -1., 1.], - [-1., 1., -1.], - [-1., 1., 1.], - [ 1., -1., -1.], - [ 1., -1., 1.], - [ 1., 1., -1.], - [ 1., 1., 1.]], dtype=np.float32) - -DATA_FILE7_darr2 = np.array([[0, 6, 4], - [0, 2, 6], - [1, 5, 3], - [3, 5, 7], - [0, 4, 1], - [1, 4, 5], - [2, 7, 6], - [2, 3, 7], - [0, 1, 2], - [1, 3, 2], - [4, 7, 5], - [4, 6, 7]], dtype=np.int32) +DATA_FILE7_darr1 = np.array( + [ + [-1.0, -1.0, -1.0], + [-1.0, -1.0, 1.0], + [-1.0, 1.0, -1.0], + [-1.0, 1.0, 1.0], + [1.0, -1.0, -1.0], + [1.0, -1.0, 1.0], + [1.0, 1.0, -1.0], + [1.0, 1.0, 1.0], + ], + dtype=np.float32, +) + +DATA_FILE7_darr2 = np.array( + [ + [0, 6, 4], + [0, 2, 6], + [1, 5, 3], + [3, 5, 7], + [0, 4, 1], + [1, 4, 5], + [2, 7, 6], + [2, 3, 7], + [0, 1, 2], + [1, 3, 2], + [4, 7, 5], + [4, 6, 7], + ], + dtype=np.int32, +) def assert_default_types(loaded): @@ -132,8 +165,9 @@ def assert_default_types(loaded): continue with suppress_warnings(): loadedtype = type(getattr(loaded, attr)) - assert loadedtype == defaulttype, ( - f"Type mismatch for attribute: {attr} ({loadedtype} != {defaulttype})") + assert ( + loadedtype == defaulttype + ), f'Type mismatch for attribute: {attr} ({loadedtype} != {defaulttype})' def test_default_types(): @@ -197,10 +231,8 @@ def test_load_dataarray1(): assert 'AnatomicalStructureSecondary' in me me['AnatomicalStructurePrimary'] == 'CortexLeft' assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) - assert xform_codes.niistring[ - img.darrays[0].coordsys.dataspace] == 'NIFTI_XFORM_TALAIRACH' - assert xform_codes.niistring[img.darrays[ - 0].coordsys.xformspace] == 'NIFTI_XFORM_TALAIRACH' + assert xform_codes.niistring[img.darrays[0].coordsys.dataspace] == 'NIFTI_XFORM_TALAIRACH' + assert xform_codes.niistring[img.darrays[0].coordsys.xformspace] == 'NIFTI_XFORM_TALAIRACH' def test_load_dataarray2(): @@ -276,8 +308,8 @@ def test_readwritedata(): save(img, 'test.gii') img2 = load('test.gii') assert img.numDA == img2.numDA - assert_array_almost_equal(img.darrays[0].data, - img2.darrays[0].data) + assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data) + def test_modify_darray(): for fname in (DATA_FILE1, DATA_FILE2, DATA_FILE5): @@ -302,13 +334,13 @@ def test_write_newmetadata(): def test_load_getbyintent(): img = load(DATA_FILE1) - da = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET") + da = img.get_arrays_from_intent('NIFTI_INTENT_POINTSET') assert len(da) == 1 - da = img.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE") + da = img.get_arrays_from_intent('NIFTI_INTENT_TRIANGLE') assert len(da) == 1 - da = img.get_arrays_from_intent("NIFTI_INTENT_CORREL") + da = img.get_arrays_from_intent('NIFTI_INTENT_CORREL') assert len(da) == 0 assert da == [] diff --git a/nibabel/gifti/util.py b/nibabel/gifti/util.py index 4071c97312..7659ee33cc 100644 --- a/nibabel/gifti/util.py +++ b/nibabel/gifti/util.py @@ -10,27 +10,28 @@ from ..volumeutils import Recoder # Translate dtype.kind char codes to XML text output strings -KIND2FMT = { - 'i': '%i', - 'u': '%i', - 'f': '%10.6f', - 'c': '%10.6f', - 'V': ''} +KIND2FMT = {'i': '%i', 'u': '%i', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} -array_index_order_codes = Recoder(((1, "RowMajorOrder", 'C'), - (2, "ColumnMajorOrder", 'F')), - fields=('code', 'label', 'npcode')) +array_index_order_codes = Recoder( + ((1, 'RowMajorOrder', 'C'), (2, 'ColumnMajorOrder', 'F')), fields=('code', 'label', 'npcode') +) gifti_encoding_codes = Recoder( - ((0, "undef", "GIFTI_ENCODING_UNDEF", "undef"), - (1, "ASCII", "GIFTI_ENCODING_ASCII", "ASCII"), - (2, "B64BIN", "GIFTI_ENCODING_B64BIN", "Base64Binary"), - (3, "B64GZ", "GIFTI_ENCODING_B64GZ", "GZipBase64Binary"), - (4, "External", "GIFTI_ENCODING_EXTBIN", "ExternalFileBinary")), - fields=('code', 'label', 'giistring', 'specs')) + ( + (0, 'undef', 'GIFTI_ENCODING_UNDEF', 'undef'), + (1, 'ASCII', 'GIFTI_ENCODING_ASCII', 'ASCII'), + (2, 'B64BIN', 'GIFTI_ENCODING_B64BIN', 'Base64Binary'), + (3, 'B64GZ', 'GIFTI_ENCODING_B64GZ', 'GZipBase64Binary'), + (4, 'External', 'GIFTI_ENCODING_EXTBIN', 'ExternalFileBinary'), + ), + fields=('code', 'label', 'giistring', 'specs'), +) gifti_endian_codes = Recoder( - ((0, "GIFTI_ENDIAN_UNDEF", "Undef", "undef"), - (1, "GIFTI_ENDIAN_BIG", "BigEndian", "big"), - (2, "GIFTI_ENDIAN_LITTLE", "LittleEndian", "little")), - fields=('code', 'giistring', 'specs', 'byteorder')) + ( + (0, 'GIFTI_ENDIAN_UNDEF', 'Undef', 'undef'), + (1, 'GIFTI_ENDIAN_BIG', 'BigEndian', 'big'), + (2, 'GIFTI_ENDIAN_LITTLE', 'LittleEndian', 'little'), + ), + fields=('code', 'giistring', 'specs', 'byteorder'), +) diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index 6b26ac0c05..614692daac 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Define supported image classes and names """ +"""Define supported image classes and names""" from .analyze import AnalyzeImage from .brikhead import AFNIImage @@ -22,22 +22,42 @@ from .spm2analyze import Spm2AnalyzeImage # Ordered by the load/save priority. -all_image_classes = [Nifti1Pair, Nifti1Image, Nifti2Pair, - Cifti2Image, Nifti2Image, # Cifti2 before Nifti2 - Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, - Minc1Image, Minc2Image, MGHImage, - PARRECImage, GiftiImage, AFNIImage] +all_image_classes = [ + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Cifti2Image, + Nifti2Image, # Cifti2 before Nifti2 + Spm2AnalyzeImage, + Spm99AnalyzeImage, + AnalyzeImage, + Minc1Image, + Minc2Image, + MGHImage, + PARRECImage, + GiftiImage, + AFNIImage, +] # Image classes known to require spatial axes to be first in index ordering. # When adding an image class, consider whether the new class should be listed # here. -KNOWN_SPATIAL_FIRST = (Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, - Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, - MGHImage, PARRECImage, AFNIImage) +KNOWN_SPATIAL_FIRST = ( + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Nifti2Image, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + AnalyzeImage, + MGHImage, + PARRECImage, + AFNIImage, +) def spatial_axes_first(img): - """ True if spatial image axes for `img` always precede other axes + """True if spatial image axes for `img` always precede other axes Parameters ---------- diff --git a/nibabel/imageglobals.py b/nibabel/imageglobals.py index 4cdeb7b1a3..81a1742809 100644 --- a/nibabel/imageglobals.py +++ b/nibabel/imageglobals.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Defaults for images and headers +"""Defaults for images and headers error_level is the problem level (see BatteryRunners) at which an error will be raised, by the batteryrunners ``log_raise`` method. Thus a level of 0 will @@ -32,8 +32,7 @@ class ErrorLevel: - """ Context manager to set log error level - """ + """Context manager to set log error level""" def __init__(self, level): self.level = level diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index 4520d7f612..f507365e93 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -33,7 +33,7 @@ def count_nonzero_voxels(img): def mask_volume(img): - """ Compute volume of mask image. + """Compute volume of mask image. Equivalent to "fslstats /path/file.nii -V" @@ -58,7 +58,7 @@ def mask_volume(img): 1000.0 """ if not spatial_axes_first(img): - raise ValueError("Cannot calculate voxel volume for image with unknown spatial axes") + raise ValueError('Cannot calculate voxel volume for image with unknown spatial axes') voxel_volume_mm3 = np.prod(img.header.get_zooms()[:3]) mask_volume_vx = count_nonzero_voxels(img) mask_volume_mm3 = mask_volume_vx * voxel_volume_mm3 diff --git a/nibabel/info.py b/nibabel/info.py index 38690246c3..bdd291728a 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -1,4 +1,4 @@ -""" Define distribution parameters for nibabel, including package version +"""Define distribution parameters for nibabel, including package version The long description parameter is used to fill settings in setup.py, the nibabel top-level docstring, and in building the docs. @@ -16,7 +16,7 @@ _version_extra = '.dev0' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" -VERSION = f"{_version_major}.{_version_minor}.{_version_micro}{_version_extra}" +VERSION = f'{_version_major}.{_version_minor}.{_version_micro}{_version_extra}' # Note: this long_description is the canonical place to edit this text. diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 763bf20788..187644a8e1 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -7,7 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # module imports -""" Utilities to load and save image objects """ +"""Utilities to load and save image objects""" import os import numpy as np @@ -43,29 +43,29 @@ def _signature_matches_extension(filename): """ signatures = { - ".gz": {"signature": b"\x1f\x8b", "format_name": "gzip"}, - ".bz2": {"signature": b"BZh", "format_name": "bzip2"}, - ".zst": {"signature": b"\x28\xb5\x2f\xfd", "format_name": "ztsd"}, + '.gz': {'signature': b'\x1f\x8b', 'format_name': 'gzip'}, + '.bz2': {'signature': b'BZh', 'format_name': 'bzip2'}, + '.zst': {'signature': b'\x28\xb5\x2f\xfd', 'format_name': 'ztsd'}, } filename = _stringify_path(filename) *_, ext = splitext_addext(filename) ext = ext.lower() if ext not in signatures: - return True, "" - expected_signature = signatures[ext]["signature"] + return True, '' + expected_signature = signatures[ext]['signature'] try: - with open(filename, "rb") as fh: + with open(filename, 'rb') as fh: sniff = fh.read(len(expected_signature)) except OSError: - return False, f"Could not read file: {filename}" + return False, f'Could not read file: {filename}' if sniff.startswith(expected_signature): - return True, "" - format_name = signatures[ext]["format_name"] - return False, f"File {filename} is not a {format_name} file" + return True, '' + format_name = signatures[ext]['format_name'] + return False, f'File {filename} is not a {format_name} file' def load(filename, **kwargs): - r""" Load file given filename, guessing at file type + r"""Load file given filename, guessing at file type Parameters ---------- @@ -105,7 +105,7 @@ def load(filename, **kwargs): @deprecate_with_version('guessed_image_type deprecated.', '3.2', '5.0') def guessed_image_type(filename): - """ Guess image type from file `filename` + """Guess image type from file `filename` Parameters ---------- @@ -127,7 +127,7 @@ def guessed_image_type(filename): def save(img, filename, **kwargs): - r""" Save an image to file adapting format to `filename` + r"""Save an image to file adapting format to `filename` Parameters ---------- @@ -173,8 +173,7 @@ def save(img, filename, **kwargs): elif type(img) == Nifti2Pair and lext == '.nii': klass = Nifti2Image else: # arbitrary conversion - valid_klasses = [klass for klass in all_image_classes - if ext in klass.valid_exts] + valid_klasses = [klass for klass in all_image_classes if ext in klass.valid_exts] if not valid_klasses: # if list is empty raise ImageFileError(f'Cannot work out file type of "{filename}"') @@ -197,12 +196,11 @@ def save(img, filename, **kwargs): converted.to_filename(filename, **kwargs) -@deprecate_with_version('read_img_data deprecated. ' - 'Please use ``img.dataobj.get_unscaled()`` instead.', - '3.2', - '5.0') +@deprecate_with_version( + 'read_img_data deprecated. ' 'Please use ``img.dataobj.get_unscaled()`` instead.', '3.2', '5.0' +) def read_img_data(img, prefer='scaled'): - """ Read data from image associated with files + """Read data from image associated with files If you want unscaled data, please use ``img.dataobj.get_unscaled()`` instead. If you want scaled data, use ``img.get_fdata()`` (which will cache @@ -257,12 +255,11 @@ def read_img_data(img, prefer='scaled'): if not hasattr(hdr, 'raw_data_from_fileobj'): # We can only do scaled if prefer == 'unscaled': - raise ValueError("Can only do unscaled for Analyze types") + raise ValueError('Can only do unscaled for Analyze types') return np.array(img.dataobj) # Analyze types img_fh = img.file_map['image'] - img_file_like = (img_fh.filename if img_fh.fileobj is None - else img_fh.fileobj) + img_file_like = img_fh.filename if img_fh.fileobj is None else img_fh.fileobj if img_file_like is None: raise ImageFileError('No image file specified for this image') # Check the consumable values in the header diff --git a/nibabel/minc1.py b/nibabel/minc1.py index c0ae95bd7b..56b8747fb4 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read MINC1 format images """ +"""Read MINC1 format images""" from numbers import Integral @@ -29,18 +29,15 @@ # See # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#MINC_specific_convenience_functions -_default_dir_cos = { - 'xspace': [1, 0, 0], - 'yspace': [0, 1, 0], - 'zspace': [0, 0, 1]} +_default_dir_cos = {'xspace': [1, 0, 0], 'yspace': [0, 1, 0], 'zspace': [0, 0, 1]} class MincError(Exception): - """ Error when reading MINC files """ + """Error when reading MINC files""" class Minc1File: - """ Class to wrap MINC1 format opened netcdf object + """Class to wrap MINC1 format opened netcdf object Although it has some of the same methods as a ``Header``, we use this only when reading a MINC file, to pull out useful header @@ -54,15 +51,13 @@ def __init__(self, mincfile): # The code below will error with vector_dimensions. See: # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#An_Introduction_to_NetCDF # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#Image_dimensions - self._dims = [self._mincfile.variables[s] - for s in self._dim_names] + self._dims = [self._mincfile.variables[s] for s in self._dim_names] # We don't currently support irregular spacing # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#MINC_specific_convenience_functions for dim in self._dims: if dim.spacing != b'regular__': raise ValueError('Irregular spacing not supported') - self._spatial_dims = [name for name in self._dim_names - if name.endswith('space')] + self._spatial_dims = [name for name in self._dim_names if name.endswith('space')] # the MINC standard appears to allow the following variables to # be undefined. # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#Image_conversion_variables @@ -91,10 +86,9 @@ def get_data_shape(self): return self._image.data.shape def get_zooms(self): - """ Get real-world sizes of voxels """ + """Get real-world sizes of voxels""" # zooms must be positive; but steps in MINC can be negative - return tuple([abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 - for dim in self._dims]) + return tuple([abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 for dim in self._dims]) def get_affine(self): nspatial = len(self._spatial_dims) @@ -104,9 +98,11 @@ def get_affine(self): dim_names = list(self._dim_names) # for indexing in loop for i, name in enumerate(self._spatial_dims): dim = self._dims[dim_names.index(name)] - rot_mat[:, i] = (dim.direction_cosines - if hasattr(dim, 'direction_cosines') - else _default_dir_cos[name]) + rot_mat[:, i] = ( + dim.direction_cosines + if hasattr(dim, 'direction_cosines') + else _default_dir_cos[name] + ) steps[i] = dim.step if hasattr(dim, 'step') else 1.0 starts[i] = dim.start if hasattr(dim, 'start') else 0.0 origin = np.dot(rot_mat, starts) @@ -116,7 +112,7 @@ def get_affine(self): return aff def _get_valid_range(self): - """ Return valid range for image data + """Return valid range for image data The valid range can come from the image 'valid_range' or image 'valid_min' and 'valid_max', or, failing that, from the @@ -128,25 +124,23 @@ def _get_valid_range(self): valid_range = self._image.valid_range except AttributeError: try: - valid_range = [self._image.valid_min, - self._image.valid_max] + valid_range = [self._image.valid_min, self._image.valid_max] except AttributeError: valid_range = [info.min, info.max] if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' - 'data type range') + raise ValueError('Valid range outside input ' 'data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): - """ Get scalar value from NetCDF scalar """ + """Get scalar value from NetCDF scalar""" return var.getValue() def _get_array(self, var): - """ Get array from NetCDF array """ + """Get array from NetCDF array""" return var.data def _normalize(self, data, sliceobj=()): - """ Apply scaling to image data `data` already sliced with `sliceobj` + """Apply scaling to image data `data` already sliced with `sliceobj` https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#Pixel_values_and_real_values @@ -177,8 +171,7 @@ def _normalize(self, data, sliceobj=()): mx_dims = self._get_dimensions(image_max) mn_dims = self._get_dimensions(image_min) if mx_dims != mn_dims: - raise MincError('"image-max" and "image-min" do not have the same' - 'dimensions') + raise MincError('"image-max" and "image-min" do not have the same' 'dimensions') nscales = len(mx_dims) if nscales > 2: raise MincError('More than two scaling dimensions') @@ -202,19 +195,20 @@ def _normalize(self, data, sliceobj=()): i_slicer = sliceobj[:nscales_ax] # Fill slicer to broadcast against sliced data; add length 1 axis # for each axis except int axes (which are dropped by slicing) - broad_part = tuple(None for s in sliceobj[ax_inds[nscales]:] - if not isinstance(s, Integral)) + broad_part = tuple( + None for s in sliceobj[ax_inds[nscales] :] if not isinstance(s, Integral) + ) i_slicer += broad_part imax = self._get_array(image_max)[i_slicer] imin = self._get_array(image_min)[i_slicer] slope = (imax - imin) / (dmax - dmin) - inter = (imin - dmin * slope) + inter = imin - dmin * slope out_data *= slope out_data += inter return out_data def get_scaled_data(self, sliceobj=()): - """ Return scaled data for slice definition `sliceobj` + """Return scaled data for slice definition `sliceobj` Parameters ---------- @@ -236,7 +230,7 @@ def get_scaled_data(self, sliceobj=()): class MincImageArrayProxy: - """ MINC implementation of array proxy protocol + """MINC implementation of array proxy protocol The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. @@ -259,7 +253,7 @@ def is_proxy(self): return True def __array__(self, dtype=None): - """ Read data from file and apply scaling, casting to ``dtype`` + """Read data from file and apply scaling, casting to ``dtype`` If ``dtype`` is unspecified, the dtype is automatically determined. @@ -279,39 +273,39 @@ def __array__(self, dtype=None): return arr def __getitem__(self, sliceobj): - """ Read slice `sliceobj` of data from file """ + """Read slice `sliceobj` of data from file""" return self.minc_file.get_scaled_data(sliceobj) class MincHeader(SpatialHeader): - """ Class to contain header for MINC formats - """ + """Class to contain header for MINC formats""" + # We don't use the data layout - this just in case we do later data_layout = 'C' def data_to_fileobj(self, data, fileobj, rescale=True): - """ See Header class for an implementation we can't use """ + """See Header class for an implementation we can't use""" raise NotImplementedError def data_from_fileobj(self, fileobj): - """ See Header class for an implementation we can't use """ + """See Header class for an implementation we can't use""" raise NotImplementedError class Minc1Header(MincHeader): - @classmethod def may_contain_header(klass, binaryblock): return binaryblock[:4] == b'CDF\x01' class Minc1Image(SpatialImage): - """ Class for MINC1 format images + """Class for MINC1 format images The MINC1 image class uses the default header type, rather than a specific MINC header type - and reads the relevant information from the MINC file on load. """ + header_class = Minc1Header _meta_sniff_len = 4 valid_exts = ('.mnc',) diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 3dce425609..275a7799c8 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Preliminary MINC2 support +"""Preliminary MINC2 support Use with care; I haven't tested this against a wide range of MINC files. @@ -31,8 +31,7 @@ class Hdf5Bunch: - """ Make object for accessing attributes of variable - """ + """Make object for accessing attributes of variable""" def __init__(self, var): for name, value in var.attrs.items(): @@ -40,7 +39,7 @@ def __init__(self, var): class Minc2File(Minc1File): - """ Class to wrap MINC2 format file + """Class to wrap MINC2 format file Although it has some of the same methods as a ``Header``, we use this only when reading a MINC2 file, to pull out useful header @@ -61,8 +60,7 @@ def __init__(self, mincfile): for dim in self._dims: if dim.spacing != b'regular__': raise ValueError('Irregular spacing not supported') - self._spatial_dims = [name for name in self._dim_names - if name.endswith('space')] + self._spatial_dims = [name for name in self._dim_names if name.endswith('space')] self._image_max = image['image-max'] self._image_min = image['image-min'] @@ -77,7 +75,7 @@ def _get_dimensions(self, var): # The dimension name list must contain only as many entries # as the variable has dimensions. This reduces errors when an # unnecessary dimorder attribute is left behind. - return dimorder.split(',')[:len(var.shape)] + return dimorder.split(',')[: len(var.shape)] def get_data_dtype(self): return self._image.dtype @@ -86,7 +84,7 @@ def get_data_shape(self): return self._image.shape def _get_valid_range(self): - """ Return valid range for image data + """Return valid range for image data The valid range can come from the image 'valid_range' or failing that, from the data type range @@ -99,20 +97,19 @@ def _get_valid_range(self): valid_range = [info.min, info.max] else: if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' - 'data type range') + raise ValueError('Valid range outside input ' 'data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): - """ Get scalar value from HDF5 scalar """ + """Get scalar value from HDF5 scalar""" return var[()] def _get_array(self, var): - """ Get array from HDF5 array """ + """Get array from HDF5 array""" return np.asanyarray(var) def get_scaled_data(self, sliceobj=()): - """ Return scaled data for slice definition `sliceobj` + """Return scaled data for slice definition `sliceobj` Parameters ---------- @@ -137,19 +134,19 @@ def get_scaled_data(self, sliceobj=()): class Minc2Header(MincHeader): - @classmethod def may_contain_header(klass, binaryblock): return binaryblock[:4] == b'\211HDF' class Minc2Image(Minc1Image): - """ Class for MINC2 images + """Class for MINC2 images The MINC2 image class uses the default header type, rather than a specific MINC header type - and reads the relevant information from the MINC file on load. """ + # MINC2 does not do compressed whole files _compressed_suffixes = () header_class = Minc2Header @@ -159,6 +156,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Import of h5py might take awhile for MPI-enabled builds # So we are importing it here "on demand" import h5py + holder = file_map['image'] if holder.filename is None: raise MincError('MINC2 needs filename for load') diff --git a/nibabel/mriutils.py b/nibabel/mriutils.py index b0f3f6a86f..d993d26a21 100644 --- a/nibabel/mriutils.py +++ b/nibabel/mriutils.py @@ -43,9 +43,11 @@ def calculate_dwell_time(water_fat_shift, echo_train_length, field_strength): if values are out of range """ if field_strength < 0: - raise MRIError("Field strength should be positive") + raise MRIError('Field strength should be positive') if echo_train_length <= 0: - raise MRIError("Echo train length should be >= 1") - return ((echo_train_length - 1) * water_fat_shift / - (GYROMAGNETIC_RATIO * PROTON_WATER_FAT_SHIFT * - field_strength * (echo_train_length + 1))) + raise MRIError('Echo train length should be >= 1') + return ( + (echo_train_length - 1) + * water_fat_shift + / (GYROMAGNETIC_RATIO * PROTON_WATER_FAT_SHIFT * field_strength * (echo_train_length + 1)) + ) diff --git a/nibabel/nicom/__init__.py b/nibabel/nicom/__init__.py index 240779a019..3a389db172 100644 --- a/nibabel/nicom/__init__.py +++ b/nibabel/nicom/__init__.py @@ -21,8 +21,11 @@ """ import warnings -warnings.warn('The DICOM readers are highly experimental, unstable,' - ' and only work for Siemens time-series at the moment\n' - 'Please use with caution. We would be grateful for your ' - 'help in improving them', - UserWarning, stacklevel=2) +warnings.warn( + 'The DICOM readers are highly experimental, unstable,' + ' and only work for Siemens time-series at the moment\n' + 'Please use with caution. We would be grateful for your ' + 'help in improving them', + UserWarning, + stacklevel=2, +) diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index 22aa3c88e6..10471e586a 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -10,15 +10,16 @@ ASCCONV_RE = re.compile( r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###', - flags=re.M | re.S) + flags=re.M | re.S, +) class AscconvParseError(Exception): - """ Error parsing ascconv file """ + """Error parsing ascconv file""" class Atom: - """ Object to hold operation, object type and object identifier + """Object to hold operation, object type and object identifier An atom represents an element in an expression. For example:: @@ -55,11 +56,11 @@ def __init__(self, op, obj_type, obj_id): class NoValue: - """ Signals no value present """ + """Signals no value present""" def assign2atoms(assign_ast, default_class=int): - """ Parse single assignment ast from ascconv line into atoms + """Parse single assignment ast from ascconv line into atoms Parameters ---------- @@ -102,7 +103,7 @@ def assign2atoms(assign_ast, default_class=int): def _create_obj_in(atom, root): - """ Find / create object defined in `atom` in dict-like given by `root` + """Find / create object defined in `atom` in dict-like given by `root` Returns corresponding value if there is already a key matching `atom.obj_id` in `root`. @@ -122,7 +123,7 @@ def _create_obj_in(atom, root): def _create_subscript_in(atom, root): - """ Find / create and insert object defined by `atom` from list `root` + """Find / create and insert object defined by `atom` from list `root` The `atom` has an index, defined in ``atom.obj_id``. If `root` is long enough to contain this index, return the object at that index. Otherwise, @@ -142,7 +143,7 @@ def _create_subscript_in(atom, root): def obj_from_atoms(atoms, namespace): - """ Return object defined by list `atoms` in dict-like `namespace` + """Return object defined by list `atoms` in dict-like `namespace` Parameters ---------- @@ -167,8 +168,7 @@ def obj_from_atoms(atoms, namespace): else: root_obj = _create_subscript_in(el, root_obj) if not isinstance(root_obj, el.obj_type): - raise AscconvParseError( - f'Unexpected type for {el.obj_id} in {prev_root}') + raise AscconvParseError(f'Unexpected type for {el.obj_id} in {prev_root}') return prev_root, el.obj_id @@ -184,7 +184,7 @@ def _get_value(assign): def parse_ascconv(ascconv_str, str_delim='"'): - '''Parse the 'ASCCONV' format from `input_str`. + """Parse the 'ASCCONV' format from `input_str`. Parameters ---------- @@ -204,11 +204,11 @@ def parse_ascconv(ascconv_str, str_delim='"'): ------ AsconvParseError A line of the ASCCONV section could not be parsed. - ''' + """ attrs, content = ASCCONV_RE.match(ascconv_str).groups() attrs = OrderedDict((tuple(x.split('=')) for x in attrs.split())) # Normalize string start / end markers to something Python understands - content = content.replace(str_delim, '"""').replace("\\", "\\\\") + content = content.replace(str_delim, '"""').replace('\\', '\\\\') # Use Python's own parser to parse modified ASCCONV assignments tree = ast.parse(content) diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 7e465ff19a..376dcb5b5a 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -1,5 +1,4 @@ -""" CSA header reader from SPM spec - +"""CSA header reader from SPM spec """ import numpy as np @@ -30,7 +29,7 @@ class CSAReadError(CSAError): def get_csa_header(dcm_data, csa_type='image'): - """ Get CSA header information from DICOM header + """Get CSA header information from DICOM header Return None if the header does not contain CSA information of the specified `csa_type` @@ -72,7 +71,7 @@ def get_csa_header(dcm_data, csa_type='image'): def read(csa_str): - """ Read CSA header from string `csa_str` + """Read CSA header from string `csa_str` Parameters ---------- @@ -99,20 +98,22 @@ def read(csa_str): csa_dict['type'] = hdr_type csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I') if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS: - raise CSAReadError('Number of tags `t` should be ' - '0 < t <= %d. Instead found %d tags.' - % (MAX_CSA_ITEMS, csa_dict['n_tags'])) + raise CSAReadError( + 'Number of tags `t` should be ' + '0 < t <= %d. Instead found %d tags.' % (MAX_CSA_ITEMS, csa_dict['n_tags']) + ) for tag_no in range(csa_dict['n_tags']): - name, vm, vr, syngodt, n_items, last3 = \ - up_str.unpack('64si4s3i') + name, vm, vr, syngodt, n_items, last3 = up_str.unpack('64si4s3i') vr = nt_str(vr) name = nt_str(name) - tag = {'n_items': n_items, - 'vm': vm, # value multiplicity - 'vr': vr, # value representation - 'syngodt': syngodt, - 'last3': last3, - 'tag_no': tag_no} + tag = { + 'n_items': n_items, + 'vm': vm, # value multiplicity + 'vr': vr, # value representation + 'syngodt': syngodt, + 'last3': last3, + 'tag_no': tag_no, + } if vm == 0: n_values = n_items else: @@ -137,8 +138,7 @@ def read(csa_str): else: # CSA2 item_len = x1 if (ptr + item_len) > csa_len: - raise CSAReadError('Item is too long, ' - 'aborting read') + raise CSAReadError('Item is too long, ' 'aborting read') if item_no >= n_values: assert item_len == 0 continue @@ -155,7 +155,7 @@ def read(csa_str): # go to 4 byte boundary plus4 = item_len % 4 if plus4 != 0: - up_str.ptr += (4 - plus4) + up_str.ptr += 4 - plus4 tag['items'] = items csa_dict['tags'][name] = tag return csa_dict @@ -184,7 +184,7 @@ def get_vector(csa_dict, tag_name, n): def is_mosaic(csa_dict): - """ Return True if the data is of Mosaic type + """Return True if the data is of Mosaic type Parameters ---------- @@ -243,7 +243,7 @@ def get_ice_dims(csa_dict): def nt_str(s): - """ Strip string to first null + """Strip string to first null Parameters ---------- diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 56d7d56946..3f5293dcc3 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -1,11 +1,10 @@ - from os.path import join as pjoin import glob import numpy as np from .. import Nifti1Image -from .dicomwrappers import (wrapper_from_data, wrapper_from_file) +from .dicomwrappers import wrapper_from_data, wrapper_from_file class DicomReadError(Exception): @@ -16,7 +15,7 @@ class DicomReadError(Exception): def mosaic_to_nii(dcm_data): - """ Get Nifti file from Siemens + """Get Nifti file from Siemens Parameters ---------- @@ -37,15 +36,11 @@ def mosaic_to_nii(dcm_data): def read_mosaic_dwi_dir(dicom_path, globber='*.dcm', dicom_kwargs=None): - return read_mosaic_dir(dicom_path, - globber, - check_is_dwi=True, - dicom_kwargs=dicom_kwargs) + return read_mosaic_dir(dicom_path, globber, check_is_dwi=True, dicom_kwargs=dicom_kwargs) -def read_mosaic_dir(dicom_path, - globber='*.dcm', check_is_dwi=False, dicom_kwargs=None): - """ Read all Siemens mosaic DICOMs in directory, return arrays, params +def read_mosaic_dir(dicom_path, globber='*.dcm', check_is_dwi=False, dicom_kwargs=None): + """Read all Siemens mosaic DICOMs in directory, return arrays, params Parameters ---------- @@ -98,7 +93,8 @@ def read_mosaic_dir(dicom_path, raise DicomReadError( f'Could not find diffusion information reading file "{fname}"; ' 'is it possible this is not a _raw_ diffusion directory? ' - 'Could it be a processed dataset like ADC etc?') + 'Could it be a processed dataset like ADC etc?' + ) b = np.nan g = np.ones((3,)) + np.nan else: @@ -107,14 +103,11 @@ def read_mosaic_dir(dicom_path, b_values.append(b) gradients.append(g) affine = np.dot(DPCS_TO_TAL, dcm_w.affine) - return (np.concatenate(arrays, -1), - affine, - np.array(b_values), - np.array(gradients)) + return (np.concatenate(arrays, -1), affine, np.array(b_values), np.array(gradients)) def slices_to_series(wrappers): - """ Sort sequence of slice wrappers into series + """Sort sequence of slice wrappers into series This follows the SPM model fairly closely @@ -169,17 +162,17 @@ def _instance_sorter(s): def _third_pass(wrappers): - """ What we do when there are not unique zs in a slice set """ + """What we do when there are not unique zs in a slice set""" inos = [s.instance_number for s in wrappers] - msg_fmt = ('Plausibly matching slices, but where some have ' - 'the same apparent slice location, and %s; ' - '- slices are probably unsortable') + msg_fmt = ( + 'Plausibly matching slices, but where some have ' + 'the same apparent slice location, and %s; ' + '- slices are probably unsortable' + ) if None in inos: - raise DicomReadError(msg_fmt % 'some or all slices with ' - 'missing InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with ' 'missing InstanceNumber') if len(set(inos)) < len(inos): - raise DicomReadError(msg_fmt % 'some or all slices with ' - 'the same InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with ' 'the same InstanceNumber') # sort by instance number wrappers.sort(key=_instance_sorter) # start loop, in which we start a new volume, each time we see a z diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 9f180a86a3..3c7268dbe0 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -1,4 +1,4 @@ -""" Classes to wrap DICOM objects and files +"""Classes to wrap DICOM objects and files The wrappers encapsulate the capabilities of the different DICOM formats. @@ -23,7 +23,7 @@ from ..openers import ImageOpener from ..onetime import auto_attr as one_time -pydicom = optional_package("pydicom")[0] +pydicom = optional_package('pydicom')[0] class WrapperError(Exception): @@ -35,7 +35,7 @@ class WrapperPrecisionError(WrapperError): def wrapper_from_file(file_like, *args, **kwargs): - r""" Create DICOM wrapper from `file_like` object + r"""Create DICOM wrapper from `file_like` object Parameters ---------- @@ -59,7 +59,7 @@ def wrapper_from_file(file_like, *args, **kwargs): def wrapper_from_data(dcm_data): - """ Create DICOM wrapper from DICOM data object + """Create DICOM wrapper from DICOM data object Parameters ---------- @@ -82,9 +82,11 @@ def wrapper_from_data(dcm_data): try: csa = csar.get_csa_header(dcm_data) except csar.CSAReadError as e: - warnings.warn('Error while attempting to read CSA header: ' + - str(e.args) + - '\n Ignoring Siemens private (CSA) header info.') + warnings.warn( + 'Error while attempting to read CSA header: ' + + str(e.args) + + '\n Ignoring Siemens private (CSA) header info.' + ) csa = None if csa is None: return Wrapper(dcm_data) @@ -96,7 +98,7 @@ def wrapper_from_data(dcm_data): class Wrapper: - """ Class to wrap general DICOM files + """Class to wrap general DICOM files Methods: @@ -119,6 +121,7 @@ class Wrapper: * slice_indicator : float * series_signature : tuple """ + is_csa = False is_mosaic = False is_multiframe = False @@ -128,7 +131,7 @@ class Wrapper: b_vector = None def __init__(self, dcm_data): - """ Initialize wrapper + """Initialize wrapper Parameters ---------- @@ -141,8 +144,7 @@ def __init__(self, dcm_data): @one_time def image_shape(self): - """ The array shape as it will be returned by ``get_data()`` - """ + """The array shape as it will be returned by ``get_data()``""" shape = (self.get('Rows'), self.get('Columns')) if None in shape: return None @@ -150,7 +152,7 @@ def image_shape(self): @one_time def image_orient_patient(self): - """ Note that this is _not_ LR flipped """ + """Note that this is _not_ LR flipped""" iop = self.get('ImageOrientationPatient') if iop is None: return None @@ -168,7 +170,7 @@ def slice_normal(self): @one_time def rotation_matrix(self): - """ Return rotation matrix between array indices and mm + """Return rotation matrix between array indices and mm Note that we swap the two columns of the 'ImageOrientPatient' when we create the rotation matrix. This is takes into account @@ -190,14 +192,12 @@ def rotation_matrix(self): # motivated in ``doc/source/notebooks/ata_error.ipynb``, and from # discussion at https://github.com/nipy/nibabel/pull/156 if not np.allclose(np.eye(3), np.dot(R, R.T), atol=5e-5): - raise WrapperPrecisionError('Rotation matrix not nearly ' - 'orthogonal') + raise WrapperPrecisionError('Rotation matrix not nearly ' 'orthogonal') return R @one_time def voxel_sizes(self): - """ voxel sizes for array as returned by ``get_data()`` - """ + """voxel sizes for array as returned by ``get_data()``""" # pix space gives (row_spacing, column_spacing). That is, the # mm you move when moving from one row to the next, and the mm # you move when moving from one column to the next @@ -216,7 +216,7 @@ def voxel_sizes(self): @one_time def image_position(self): - """ Return position of first voxel in data block + """Return position of first voxel in data block Parameters ---------- @@ -235,7 +235,7 @@ def image_position(self): @one_time def slice_indicator(self): - """ A number that is higher for higher slices in Z + """A number that is higher for higher slices in Z Comparing this number between two adjacent slices should give a difference equal to the voxel size in Z. @@ -250,12 +250,12 @@ def slice_indicator(self): @one_time def instance_number(self): - """ Just because we use this a lot for sorting """ + """Just because we use this a lot for sorting""" return self.get('InstanceNumber') @one_time def series_signature(self): - """ Signature for matching slices into series + """Signature for matching slices into series We use `signature` in ``self.is_same_series(other)``. @@ -270,11 +270,13 @@ def series_signature(self): # dictionary with value, comparison func tuple signature = {} eq = operator.eq - for key in ('SeriesInstanceUID', - 'SeriesNumber', - 'ImageType', - 'SequenceName', - 'EchoNumbers'): + for key in ( + 'SeriesInstanceUID', + 'SeriesNumber', + 'ImageType', + 'SequenceName', + 'EchoNumbers', + ): signature[key] = (self.get(key), eq) signature['image_shape'] = (self.image_shape, eq) signature['iop'] = (self.image_orient_patient, none_or_close) @@ -282,18 +284,18 @@ def series_signature(self): return signature def __getitem__(self, key): - """ Return values from DICOM object""" + """Return values from DICOM object""" if key not in self.dcm_data: raise KeyError(f'"{key}" not in self.dcm_data') return self.dcm_data.get(key) def get(self, key, default=None): - """ Get values from underlying dicom data """ + """Get values from underlying dicom data""" return self.dcm_data.get(key, default) @property def affine(self): - """ Mapping between voxel and DICOM coordinate system + """Mapping between voxel and DICOM coordinate system (4, 4) affine matrix giving transformation between voxels in data array and mm in the DICOM patient coordinate system. @@ -315,14 +317,14 @@ def affine(self): return aff def get_pixel_array(self): - """ Return unscaled pixel array from DICOM """ + """Return unscaled pixel array from DICOM""" data = self.dcm_data.get('pixel_array') if data is None: raise WrapperError('Cannot find data in DICOM') return data def get_data(self): - """ Get scaled image data from DICOMs + """Get scaled image data from DICOMs We return the data as DICOM understands it, first dimension is rows, second dimension is columns @@ -336,7 +338,7 @@ def get_data(self): return self._scale_data(self.get_pixel_array()) def is_same_series(self, other): - """ Return True if `other` appears to be in same series + """Return True if `other` appears to be in same series Parameters ---------- @@ -365,8 +367,7 @@ def is_same_series(self, other): if not func(v1, v2): return False # values present in one or the other but not both - for keys, sig in ((my_keys - your_keys, my_sig), - (your_keys - my_keys, your_sig)): + for keys, sig in ((my_keys - your_keys, my_sig), (your_keys - my_keys, your_sig)): for key in keys: v1, func = sig[key] if not func(v1, None): @@ -393,8 +394,7 @@ def _apply_scale_offset(self, data, scale, offset): @one_time def b_value(self): - """ Return b value for diffusion or None if not available - """ + """Return b value for diffusion or None if not available""" q_vec = self.q_vector if q_vec is None: return None @@ -402,8 +402,7 @@ def b_value(self): @one_time def b_vector(self): - """ Return b vector for diffusion or None if not available - """ + """Return b vector for diffusion or None if not available""" q_vec = self.q_vector if q_vec is None: return None @@ -446,6 +445,7 @@ class MultiframeWrapper(Wrapper): series_signature(self) get_data(self) """ + is_multiframe = True def __init__(self, dcm_data): @@ -464,11 +464,11 @@ def __init__(self, dcm_data): try: self.frames[0] except TypeError: - raise WrapperError("PerFrameFunctionalGroupsSequence is empty.") + raise WrapperError('PerFrameFunctionalGroupsSequence is empty.') try: self.shared = dcm_data.get('SharedFunctionalGroupsSequence')[0] except TypeError: - raise WrapperError("SharedFunctionalGroupsSequence is empty.") + raise WrapperError('SharedFunctionalGroupsSequence is empty.') self._shape = None @one_time @@ -501,7 +501,7 @@ def image_shape(self): """ rows, cols = self.get('Rows'), self.get('Columns') if None in (rows, cols): - raise WrapperError("Rows and/or Columns are empty.") + raise WrapperError('Rows and/or Columns are empty.') # Check number of frames first_frame = self.frames[0] @@ -512,35 +512,34 @@ def image_shape(self): # DWI image may include derived isotropic, ADC or trace volume try: self.frames = pydicom.Sequence( - frame for frame in self.frames if - frame.MRDiffusionSequence[0].DiffusionDirectionality - != 'ISOTROPIC' - ) + frame + for frame in self.frames + if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' + ) except IndexError: # Sequence tag is found but missing items! - raise WrapperError("Diffusion file missing information") + raise WrapperError('Diffusion file missing information') except AttributeError: # DiffusionDirectionality tag is not required pass else: if n_frames != len(self.frames): - warnings.warn("Derived images found and removed") + warnings.warn('Derived images found and removed') n_frames = len(self.frames) has_derived = True assert len(self.frames) == n_frames frame_indices = np.array( - [frame.FrameContentSequence[0].DimensionIndexValues - for frame in self.frames]) + [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] + ) # Check that there is only one multiframe stack index - stack_ids = set(frame.FrameContentSequence[0].StackID - for frame in self.frames) + stack_ids = set(frame.FrameContentSequence[0].StackID for frame in self.frames) if len(stack_ids) > 1: - raise WrapperError("File contains more than one StackID. " - "Cannot handle multi-stack files") + raise WrapperError( + 'File contains more than one StackID. ' 'Cannot handle multi-stack files' + ) # Determine if one of the dimension indices refers to the stack id - dim_seq = [dim.DimensionIndexPointer - for dim in self.get('DimensionIndexSequence')] + dim_seq = [dim.DimensionIndexPointer for dim in self.get('DimensionIndexSequence')] stackid_tag = pydicom.datadict.tag_for_keyword('StackID') # remove the stack id axis if present if stackid_tag in dim_seq: @@ -549,10 +548,11 @@ def image_shape(self): dim_seq.pop(stackid_dim_idx) if has_derived: # derived volume is included - derived_tag = pydicom.datadict.tag_for_keyword("DiffusionBValue") + derived_tag = pydicom.datadict.tag_for_keyword('DiffusionBValue') if derived_tag not in dim_seq: - raise WrapperError("Missing information, cannot remove indices " - "with confidence.") + raise WrapperError( + 'Missing information, cannot remove indices ' 'with confidence.' + ) derived_dim_idx = dim_seq.index(derived_tag) frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) # account for the 2 additional dimensions (row and column) not included @@ -567,8 +567,7 @@ def image_shape(self): shape = (rows, cols) + tuple(ns_unique) n_vols = np.prod(shape[3:]) if n_frames != n_vols * shape[2]: - raise WrapperError("Calculated shape does not match number of " - "frames.") + raise WrapperError('Calculated shape does not match number of ' 'frames.') return tuple(shape) @one_time @@ -582,8 +581,7 @@ def image_orient_patient(self): try: iop = self.frames[0].PlaneOrientationSequence[0].ImageOrientationPatient except AttributeError: - raise WrapperError("Not enough information for " - "image_orient_patient") + raise WrapperError('Not enough information for ' 'image_orient_patient') if iop is None: return None iop = np.array(list(map(float, iop))) @@ -591,14 +589,14 @@ def image_orient_patient(self): @one_time def voxel_sizes(self): - """ Get i, j, k voxel sizes """ + """Get i, j, k voxel sizes""" try: pix_measures = self.shared.PixelMeasuresSequence[0] except AttributeError: try: pix_measures = self.frames[0].PixelMeasuresSequence[0] except AttributeError: - raise WrapperError("Not enough data for pixel spacing") + raise WrapperError('Not enough data for pixel spacing') pix_space = pix_measures.PixelSpacing try: zs = pix_measures.SliceThickness @@ -626,9 +624,7 @@ def image_position(self): def series_signature(self): signature = {} eq = operator.eq - for key in ('SeriesInstanceUID', - 'SeriesNumber', - 'ImageType'): + for key in ('SeriesInstanceUID', 'SeriesNumber', 'ImageType'): signature[key] = (self.get(key), eq) signature['image_shape'] = (self.image_shape, eq) signature['iop'] = (self.image_orient_patient, none_or_close) @@ -649,8 +645,7 @@ def get_data(self): return self._scale_data(data) def _scale_data(self, data): - pix_trans = getattr( - self.frames[0], 'PixelValueTransformationSequence', None) + pix_trans = getattr(self.frames[0], 'PixelValueTransformationSequence', None) if pix_trans is None: return super(MultiframeWrapper, self)._scale_data(data) scale = float(pix_trans[0].RescaleSlope) @@ -659,7 +654,7 @@ def _scale_data(self, data): class SiemensWrapper(Wrapper): - """ Wrapper for Siemens format DICOMs + """Wrapper for Siemens format DICOMs Adds attributes: @@ -667,10 +662,11 @@ class SiemensWrapper(Wrapper): * b_matrix : (3,3) array * q_vector : (3,) array """ + is_csa = True def __init__(self, dcm_data, csa_header=None): - """ Initialize Siemens wrapper + """Initialize Siemens wrapper The Siemens-specific information is in the `csa_header`, either passed in here, or read from the input `dcm_data`. @@ -723,7 +719,7 @@ def slice_normal(self): @one_time def series_signature(self): - """ Add ICE dims from CSA header to signature """ + """Add ICE dims from CSA header to signature""" signature = super(SiemensWrapper, self).series_signature ice = csar.get_ice_dims(self.csa_header) if ice is not None: @@ -733,7 +729,7 @@ def series_signature(self): @one_time def b_matrix(self): - """ Get DWI B matrix referring to voxel space + """Get DWI B matrix referring to voxel space Parameters ---------- @@ -770,7 +766,7 @@ def b_matrix(self): @one_time def q_vector(self): - """ Get DWI q vector referring to voxel space + """Get DWI q vector referring to voxel space Parameters ---------- @@ -791,7 +787,7 @@ def q_vector(self): class MosaicWrapper(SiemensWrapper): - """ Class for Siemens mosaic format data + """Class for Siemens mosaic format data Mosaic format is a way of storing a 3D image in a 2D slice - and it's as simple as you'd imagine it would be - just storing the slices @@ -806,10 +802,11 @@ class MosaicWrapper(SiemensWrapper): * n_mosaic : int * mosaic_size : int """ + is_mosaic = True def __init__(self, dcm_data, csa_header=None, n_mosaic=None): - """ Initialize Siemens Mosaic wrapper + """Initialize Siemens Mosaic wrapper The Siemens-specific information is in the `csa_header`, either passed in here, or read from the input `dcm_data`. @@ -834,28 +831,28 @@ def __init__(self, dcm_data, csa_header=None, n_mosaic=None): except KeyError: pass if n_mosaic is None or n_mosaic == 0: - raise WrapperError('No valid mosaic number in CSA ' - 'header; is this really ' - 'Siemens mosiac data?') + raise WrapperError( + 'No valid mosaic number in CSA ' + 'header; is this really ' + 'Siemens mosiac data?' + ) self.n_mosaic = n_mosaic self.mosaic_size = int(np.ceil(np.sqrt(n_mosaic))) @one_time def image_shape(self): - """ Return image shape as returned by ``get_data()`` """ + """Return image shape as returned by ``get_data()``""" # reshape pixel slice array back from mosaic rows = self.get('Rows') cols = self.get('Columns') if None in (rows, cols): return None mosaic_size = self.mosaic_size - return (int(rows / mosaic_size), - int(cols / mosaic_size), - self.n_mosaic) + return (int(rows / mosaic_size), int(cols / mosaic_size), self.n_mosaic) @one_time def image_position(self): - """ Return position of first voxel in data block + """Return position of first voxel in data block Adjusts Siemens mosaic position vector for bug in mosaic format position. See ``dicom_mosaic`` in doc/theory for details. @@ -891,7 +888,7 @@ def image_position(self): return ipp + np.dot(Q, vox_trans_fixes[:, None]).ravel() def get_data(self): - """ Get scaled image data from DICOMs + """Get scaled image data from DICOMs Resorts data block from mosaic to 3D @@ -925,10 +922,9 @@ def get_data(self): raise WrapperError('No valid information for image shape') n_slice_rows, n_slice_cols, n_mosaic = shape n_slab_rows = self.mosaic_size - n_blocks = n_slab_rows ** 2 + n_blocks = n_slab_rows**2 data = self.get_pixel_array() - v4 = data.reshape(n_slab_rows, n_slice_rows, - n_slab_rows, n_slice_cols) + v4 = data.reshape(n_slab_rows, n_slice_rows, n_slab_rows, n_slice_cols) # move the mosaic dims to the end v4 = v4.transpose((1, 3, 0, 2)) # pool mosaic-generated dims @@ -939,7 +935,7 @@ def get_data(self): def none_or_close(val1, val2, rtol=1e-5, atol=1e-6): - """ Match if `val1` and `val2` are both None, or are close + """Match if `val1` and `val2` are both None, or are close Parameters ---------- diff --git a/nibabel/nicom/dwiparams.py b/nibabel/nicom/dwiparams.py index 62b28cb7e3..cb0e501202 100644 --- a/nibabel/nicom/dwiparams.py +++ b/nibabel/nicom/dwiparams.py @@ -1,4 +1,4 @@ -""" Process diffusion imaging parameters +"""Process diffusion imaging parameters * ``q`` is a vector in Q space * ``b`` is a b value @@ -17,14 +17,13 @@ ``q_est`` is the closest q vector equivalent to the B matrix, then: B ~ (q_est . q_est.T) / norm(q_est) - """ import numpy as np import numpy.linalg as npl def B2q(B, tol=None): - """ Estimate q vector from input B matrix `B` + """Estimate q vector from input B matrix `B` We require that the input `B` is symmetric positive definite. @@ -68,7 +67,7 @@ def B2q(B, tol=None): def nearest_pos_semi_def(B): - """ Least squares positive semi-definite tensor estimation + """Least squares positive semi-definite tensor estimation Reference: Niethammer M, San Jose Estepar R, Bouix S, Shenton M, Westin CF. On diffusion tensor estimation. Conf Proc IEEE Eng Med @@ -106,7 +105,7 @@ def nearest_pos_semi_def(B): lam1a, lam2a, lam3a = vals scalers = np.zeros((3,)) if cardneg == 2: - b112 = np.max([0, lam1a + (lam2a + lam3a) / 3.]) + b112 = np.max([0, lam1a + (lam2a + lam3a) / 3.0]) scalers[0] = b112 elif cardneg == 1: lam1b = lam1a + 0.25 * lam3a @@ -115,10 +114,10 @@ def nearest_pos_semi_def(B): scalers[:2] = lam1b, lam2b else: # one of the lam1b, lam2b is < 0 if lam2b < 0: - b111 = np.max([0, lam1a + (lam2a + lam3a) / 3.]) + b111 = np.max([0, lam1a + (lam2a + lam3a) / 3.0]) scalers[0] = b111 if lam1b < 0: - b221 = np.max([0, lam2a + (lam1a + lam3a) / 3.]) + b221 = np.max([0, lam2a + (lam1a + lam3a) / 3.0]) scalers[1] = b221 # resort the scalers to match the original vecs scalers = scalers[np.argsort(inds)] @@ -126,7 +125,7 @@ def nearest_pos_semi_def(B): def q2bg(q_vector, tol=1e-5): - """ Return b value and q unit vector from q vector `q_vector` + """Return b value and q unit vector from q vector `q_vector` Parameters ---------- @@ -155,5 +154,5 @@ def q2bg(q_vector, tol=1e-5): q_vec = np.asarray(q_vector) norm = np.sqrt(np.sum(q_vec * q_vec)) if norm < tol: - return (0., np.zeros((3,))) + return (0.0, np.zeros((3,))) return norm, q_vec / norm diff --git a/nibabel/nicom/structreader.py b/nibabel/nicom/structreader.py index eb714804f1..086a463d2e 100644 --- a/nibabel/nicom/structreader.py +++ b/nibabel/nicom/structreader.py @@ -1,4 +1,4 @@ -""" Stream-like reader for packed data """ +"""Stream-like reader for packed data""" from struct import Struct @@ -6,7 +6,7 @@ class Unpacker: - """ Class to unpack values from buffer object + """Class to unpack values from buffer object The buffer object is usually a string. Caches compiled :mod:`struct` format strings so that repeated unpacking with the same format @@ -29,7 +29,7 @@ class Unpacker: """ def __init__(self, buf, ptr=0, endian=None): - """ Initialize unpacker + """Initialize unpacker Parameters ---------- @@ -50,7 +50,7 @@ def __init__(self, buf, ptr=0, endian=None): self._cache = {} def unpack(self, fmt): - """ Unpack values from contained buffer + """Unpack values from contained buffer Unpacks values from ``self.buf`` and updates ``self.ptr`` to the position after the read data. @@ -89,7 +89,7 @@ def unpack(self, fmt): return values def read(self, n_bytes=-1): - """ Return byte string of length `n_bytes` at current position + """Return byte string of length `n_bytes` at current position Returns sub-string from ``self.buf`` and updates ``self.ptr`` to the position after the read data. diff --git a/nibabel/nicom/tests/__init__.py b/nibabel/nicom/tests/__init__.py index 75f5dbc5ac..4a7ea3b284 100644 --- a/nibabel/nicom/tests/__init__.py +++ b/nibabel/nicom/tests/__init__.py @@ -1,6 +1,6 @@ import unittest from nibabel.optpkg import optional_package -pydicom, have_dicom, _ = optional_package("pydicom") +pydicom, have_dicom, _ = optional_package('pydicom') -dicom_test = unittest.skipUnless(have_dicom, "Could not import pydicom") +dicom_test = unittest.skipUnless(have_dicom, 'Could not import pydicom') diff --git a/nibabel/nicom/tests/data_pkgs.py b/nibabel/nicom/tests/data_pkgs.py index 2424666a72..e95478ef90 100644 --- a/nibabel/nicom/tests/data_pkgs.py +++ b/nibabel/nicom/tests/data_pkgs.py @@ -1,16 +1,10 @@ -""" Data packages for DICOM testing """ +"""Data packages for DICOM testing""" from ... import data as nibd -PUBLIC_PKG_DEF = dict( - relpath='nipy/dicom/public', - name='nipy-dicom-public', - version='0.1') +PUBLIC_PKG_DEF = dict(relpath='nipy/dicom/public', name='nipy-dicom-public', version='0.1') -PRIVATE_PKG_DEF = dict( - relpath='nipy/dicom/private', - name='nipy-dicom-private', - version='0.1') +PRIVATE_PKG_DEF = dict(relpath='nipy/dicom/private', name='nipy-dicom-private', version='0.1') PUBLIC_DS = nibd.datasource_or_bomber(PUBLIC_PKG_DEF) diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index a1efd7fa29..6415c2725e 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -1,4 +1,4 @@ -""" Testing Siemens "ASCCONV" parser +"""Testing Siemens "ASCCONV" parser """ from os.path import join as pjoin, dirname @@ -22,12 +22,17 @@ def test_ascconv_parse(): assert len(ascconv_dict) == 72 assert ascconv_dict['tProtocolName'] == 'CBU+AF8-DTI+AF8-64D+AF8-1A' assert ascconv_dict['ucScanRegionPosValid'] == 1 - assert_array_almost_equal(ascconv_dict['sProtConsistencyInfo']['flNominalB0'], - 2.89362) + assert_array_almost_equal(ascconv_dict['sProtConsistencyInfo']['flNominalB0'], 2.89362) assert ascconv_dict['sProtConsistencyInfo']['flGMax'] == 26 - assert (list(ascconv_dict['sSliceArray'].keys()) == - ['asSlice', 'anAsc', 'anPos', 'lSize', 'lConc', 'ucMode', - 'sTSat']) + assert list(ascconv_dict['sSliceArray'].keys()) == [ + 'asSlice', + 'anAsc', + 'anPos', + 'lSize', + 'lConc', + 'ucMode', + 'sTSat', + ] slice_arr = ascconv_dict['sSliceArray'] as_slice = slice_arr['asSlice'] assert_array_equal([e['dPhaseFOV'] for e in as_slice], 230) @@ -42,8 +47,7 @@ def test_ascconv_parse(): # This lower-level list does start indexing at 0 assert len(as_list) == 12 for i, el in enumerate(as_list): - assert (list(el.keys()) == - ['sCoilElementID', 'lElementSelected', 'lRxChannelConnected']) + assert list(el.keys()) == ['sCoilElementID', 'lElementSelected', 'lRxChannelConnected'] assert el['lElementSelected'] == 1 assert el['lRxChannelConnected'] == i + 1 # Test negative number @@ -51,11 +55,13 @@ def test_ascconv_parse(): def test_ascconv_w_attrs(): - in_str = ("### ASCCONV BEGIN object=MrProtDataImpl@MrProtocolData " - "version=41340006 " - "converter=%MEASCONST%/ConverterList/Prot_Converter.txt ###\n" - "test = \"hello\"\n" - "### ASCCONV END ###") + in_str = ( + '### ASCCONV BEGIN object=MrProtDataImpl@MrProtocolData ' + 'version=41340006 ' + 'converter=%MEASCONST%/ConverterList/Prot_Converter.txt ###\n' + 'test = "hello"\n' + '### ASCCONV END ###' + ) ascconv_dict, attrs = ascconv.parse_ascconv(in_str, '""') assert attrs['object'] == 'MrProtDataImpl@MrProtocolData' assert attrs['version'] == '41340006' diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 912e98fe18..1dfe348c4b 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -1,4 +1,4 @@ -""" Testing Siemens CSA header reader +"""Testing Siemens CSA header reader """ import sys from os.path import join as pjoin @@ -114,12 +114,9 @@ def test_csa_params(): def test_ice_dims(): - ex_dims0 = ['X', '1', '1', '1', '1', '1', '1', - '48', '1', '1', '1', '1', '201'] - ex_dims1 = ['X', '1', '1', '1', '2', '1', '1', - '48', '1', '1', '1', '1', '201'] - for csa_str, ex_dims in ((CSA2_B0, ex_dims0), - (CSA2_B1000, ex_dims1)): + ex_dims0 = ['X', '1', '1', '1', '1', '1', '1', '48', '1', '1', '1', '1', '201'] + ex_dims1 = ['X', '1', '1', '1', '2', '1', '1', '48', '1', '1', '1', '1', '201'] + for csa_str, ex_dims in ((CSA2_B0, ex_dims0), (CSA2_B1000, ex_dims1)): csa_info = csa.read(csa_str) assert csa.get_ice_dims(csa_info) == ex_dims assert csa.get_ice_dims({}) is None diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index b1ae9edae9..dba29b6503 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -1,5 +1,4 @@ -""" Testing reading DICOM files - +"""Testing reading DICOM files """ from os.path import join as pjoin @@ -13,7 +12,7 @@ import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal -pydicom, _, setup_module = optional_package("pydicom") +pydicom, _, setup_module = optional_package('pydicom') def test_read_dwi(): @@ -24,8 +23,7 @@ def test_read_dwi(): def test_read_dwis(): - data, aff, bs, gs = didr.read_mosaic_dwi_dir(IO_DATA_PATH, - 'siemens_dwi_*.dcm.gz') + data, aff, bs, gs = didr.read_mosaic_dwi_dir(IO_DATA_PATH, 'siemens_dwi_*.dcm.gz') assert data.ndim == 4 assert_array_almost_equal(aff, EXPECTED_AFFINE) assert_array_almost_equal(bs, (0, EXPECTED_PARAMS[0])) @@ -41,10 +39,7 @@ def test_passing_kwds(): for func in (didr.read_mosaic_dwi_dir, didr.read_mosaic_dir): data, aff, bs, gs = func(IO_DATA_PATH, dwi_glob) # This should not raise an error - data2, aff2, bs2, gs2 = func( - IO_DATA_PATH, - dwi_glob, - dicom_kwargs=dict(force=True)) + data2, aff2, bs2, gs2 = func(IO_DATA_PATH, dwi_glob, dicom_kwargs=dict(force=True)) assert_array_equal(data, data2) # This should raise an error in pydicom.dicomio.read_file with pytest.raises(TypeError): @@ -59,9 +54,8 @@ def test_passing_kwds(): def test_slices_to_series(): - dicom_files = (pjoin(IO_DATA_PATH, "%d.dcm" % i) for i in range(2)) + dicom_files = (pjoin(IO_DATA_PATH, '%d.dcm' % i) for i in range(2)) wrappers = [didr.wrapper_from_file(f) for f in dicom_files] series = didr.slices_to_series(wrappers) assert len(series) == 1 assert len(series[0]) == 2 - diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index d65afc6d27..3dd1665c3f 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -1,4 +1,4 @@ -""" Testing DICOM wrappers +"""Testing DICOM wrappers """ from os.path import join as pjoin, dirname @@ -34,26 +34,25 @@ DATA_FILE_DEC_RSCL = pjoin(IO_DATA_PATH, 'decimal_rescale.dcm') DATA_FILE_4D = pjoin(IO_DATA_PATH, '4d_multiframe_test.dcm') DATA_FILE_EMPTY_ST = pjoin(IO_DATA_PATH, 'slicethickness_empty_string.dcm') -DATA_FILE_4D_DERIVED = pjoin(get_nibabel_data(), 'nitest-dicom', - '4d_multiframe_with_derived.dcm') -DATA_FILE_CT = pjoin(get_nibabel_data(), 'nitest-dicom', - 'siemens_ct_header_csa.dcm') +DATA_FILE_4D_DERIVED = pjoin(get_nibabel_data(), 'nitest-dicom', '4d_multiframe_with_derived.dcm') +DATA_FILE_CT = pjoin(get_nibabel_data(), 'nitest-dicom', 'siemens_ct_header_csa.dcm') # This affine from our converted image was shown to match our image spatially # with an image from SPM DICOM conversion. We checked the matching with SPM # check reg. We have flipped the first and second rows to allow for rows, cols # transpose in current return compared to original case. EXPECTED_AFFINE = np.array( # do this for philips? - [[-1.796875, 0, 0, 115], - [0, -1.79684984, -0.01570896, 135.028779], - [0, -0.00940843750, 2.99995887, -78.710481], - [0, 0, 0, 1]])[:, [1, 0, 2, 3]] + [ + [-1.796875, 0, 0, 115], + [0, -1.79684984, -0.01570896, 135.028779], + [0, -0.00940843750, 2.99995887, -78.710481], + [0, 0, 0, 1], + ] +)[:, [1, 0, 2, 3]] # from Guys and Matthew's SPM code, undoing SPM's Y flip, and swapping first two # values in vector, to account for data rows, cols difference. -EXPECTED_PARAMS = [992.05050247, (0.00507649, - 0.99997450, - -0.005023611)] +EXPECTED_PARAMS = [992.05050247, (0.00507649, 0.99997450, -0.005023611)] @dicom_test @@ -62,11 +61,14 @@ def test_wrappers(): # first with empty or minimal data multi_minimal = { 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None]} - for maker, args in ((didw.Wrapper, ({},)), - (didw.SiemensWrapper, ({},)), - (didw.MosaicWrapper, ({}, None, 10)), - (didw.MultiframeWrapper, (multi_minimal,))): + 'SharedFunctionalGroupsSequence': [None], + } + for maker, args in ( + (didw.Wrapper, ({},)), + (didw.SiemensWrapper, ({},)), + (didw.MosaicWrapper, ({}, None, 10)), + (didw.MultiframeWrapper, (multi_minimal,)), + ): dw = maker(*args) assert dw.get('InstanceNumber') is None assert dw.get('AcquisitionNumber') is None @@ -83,11 +85,7 @@ def test_wrappers(): assert not dw.is_mosaic assert dw.b_matrix is None assert dw.q_vector is None - for maker in (didw.wrapper_from_data, - didw.Wrapper, - didw.SiemensWrapper, - didw.MosaicWrapper - ): + for maker in (didw.wrapper_from_data, didw.Wrapper, didw.SiemensWrapper, didw.MosaicWrapper): dw = maker(DATA) assert dw.get('InstanceNumber') == 2 assert dw.get('AcquisitionNumber') == 2 @@ -117,6 +115,7 @@ def test_get_from_wrapper(): class FakeData(dict): pass + d = FakeData() d.some_key = 'another bit of data' dw = didw.Wrapper(d) @@ -124,9 +123,9 @@ class FakeData(dict): # Check get defers to dcm_data get class FakeData2: - def get(self, key, default): return 1 + d = FakeData2() d.some_key = 'another bit of data' dw = didw.Wrapper(d) @@ -136,18 +135,14 @@ def get(self, key, default): @dicom_test def test_wrapper_from_data(): # test wrapper from data, wrapper from file - for dw in (didw.wrapper_from_data(DATA), - didw.wrapper_from_file(DATA_FILE)): + for dw in (didw.wrapper_from_data(DATA), didw.wrapper_from_file(DATA_FILE)): assert dw.get('InstanceNumber') == 2 assert dw.get('AcquisitionNumber') == 2 with pytest.raises(KeyError): dw['not an item'] assert dw.is_mosaic - assert_array_almost_equal( - np.dot(didr.DPCS_TO_TAL, dw.affine), - EXPECTED_AFFINE) - for dw in (didw.wrapper_from_data(DATA_PHILIPS), - didw.wrapper_from_file(DATA_FILE_PHILIPS)): + assert_array_almost_equal(np.dot(didr.DPCS_TO_TAL, dw.affine), EXPECTED_AFFINE) + for dw in (didw.wrapper_from_data(DATA_PHILIPS), didw.wrapper_from_file(DATA_FILE_PHILIPS)): assert dw.get('InstanceNumber') == 1 assert dw.get('AcquisitionNumber') == 3 with pytest.raises(KeyError): @@ -216,13 +211,13 @@ def test_q_vector_etc(): assert dw.b_vector is None for pos in range(3): q_vec = np.zeros((3,)) - q_vec[pos] = 10. + q_vec[pos] = 10.0 # Reset wrapped dicom to refresh one_time property dw = didw.Wrapper(DATA) dw.q_vector = q_vec assert_array_equal(dw.q_vector, q_vec) assert dw.b_value == 10 - assert_array_equal(dw.b_vector, q_vec / 10.) + assert_array_equal(dw.b_vector, q_vec / 10.0) # Reset wrapped dicom to refresh one_time property dw = didw.Wrapper(DATA) dw.q_vector = np.array([0, 0, 1e-6]) @@ -269,6 +264,7 @@ def test_vol_matching(): class C: series_signature = {} + assert dw_empty.is_same_series(C()) # make the Philips wrapper, check it compares True against itself @@ -333,9 +329,7 @@ def test_rotation_matrix(): assert_array_equal(dw.rotation_matrix, np.eye(3)) d['ImageOrientationPatient'] = [1, 0, 0, 0, 1, 0] dw = didw.wrapper_from_data(d) - assert_array_equal(dw.rotation_matrix, [[0, 1, 0], - [1, 0, 0], - [0, 0, -1]]) + assert_array_equal(dw.rotation_matrix, [[0, 1, 0], [1, 0, 0], [0, 0, -1]]) @dicom_test @@ -354,7 +348,7 @@ def test_assert_parallel(): # Test that we get an AssertionError if the cross product and the CSA # slice normal are not parallel dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM) - dw.image_orient_patient = np.c_[[1., 0., 0.], [0., 1., 0.]] + dw.image_orient_patient = np.c_[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] with pytest.raises(AssertionError): dw.slice_normal @@ -368,7 +362,7 @@ def test_decimal_rescale(): def fake_frames(seq_name, field_name, value_seq): - """ Make fake frames for multiframe testing + """Make fake frames for multiframe testing Parameters ---------- @@ -385,8 +379,10 @@ def fake_frames(seq_name, field_name, value_seq): each element in list is obj.[0]. = value_seq[n] for n in range(N) """ + class Fake: pass + frames = [] for value in value_seq: fake_frame = Fake() @@ -398,7 +394,7 @@ class Fake: def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): - """ Make a fake dictionary of data that ``image_shape`` is dependent on. + """Make a fake dictionary of data that ``image_shape`` is dependent on. Parameters ---------- @@ -409,18 +405,22 @@ def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): sid_dim : int the index of the column in 'div_seq' to use as 'sid_seq' """ + class DimIdxSeqElem: def __init__(self, dip=(0, 0), fgp=None): self.DimensionIndexPointer = dip if fgp is not None: self.FunctionalGroupPointer = fgp + class FrmContSeqElem: def __init__(self, div, sid): self.DimensionIndexValues = div self.StackID = sid + class PerFrmFuncGrpSeqElem: def __init__(self, div, sid): self.FrameContentSequence = [FrmContSeqElem(div, sid)] + # if no StackID values passed in then use the values at index 'sid_dim' in # the value for DimensionIndexValues for it if sid_seq is None: @@ -436,11 +436,12 @@ def __init__(self, div, sid): fcs_tag = pydicom.datadict.tag_for_keyword('FrameContentSequence') dim_idx_seq[sid_dim] = DimIdxSeqElem(sid_tag, fcs_tag) # create the PerFrameFunctionalGroupsSequence - frames = [PerFrmFuncGrpSeqElem(div, sid) - for div, sid in zip(div_seq, sid_seq)] - return {'NumberOfFrames' : num_of_frames, - 'DimensionIndexSequence' : dim_idx_seq, - 'PerFrameFunctionalGroupsSequence' : frames} + frames = [PerFrmFuncGrpSeqElem(div, sid) for div, sid in zip(div_seq, sid_seq)] + return { + 'NumberOfFrames': num_of_frames, + 'DimensionIndexSequence': dim_idx_seq, + 'PerFrameFunctionalGroupsSequence': frames, + } class TestMultiFrameWrapper(TestCase): @@ -448,7 +449,8 @@ class TestMultiFrameWrapper(TestCase): MINIMAL_MF = { # Minimal contents of dcm_data for this wrapper 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None]} + 'SharedFunctionalGroupsSequence': [None], + } WRAPCLASS = didw.MultiframeWrapper @dicom_test @@ -485,13 +487,11 @@ def test_shape(self): with pytest.raises(didw.WrapperError): MFW(fake_mf).image_shape # Make some fake frame data for 4D when StackID index is 0 - div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), - (1, 1, 3), (1, 2, 3)) + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) # Check stack number matching for 4D when StackID index is 0 - div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), - (1, 1, 3), (2, 2, 3)) + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (2, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) with pytest.raises(didw.WrapperError): MFW(fake_mf).image_shape @@ -535,8 +535,7 @@ def test_shape(self): with pytest.raises(didw.WrapperError): MFW(fake_mf).image_shape # Make some fake frame data for 4D when StackID index is 1 - div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), - (1, 1, 3), (2, 1, 3)) + div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), (1, 1, 3), (2, 1, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) @@ -548,18 +547,16 @@ def test_iop(self): with pytest.raises(didw.WrapperError): dw.image_orient_patient # Make a fake frame - fake_frame = fake_frames('PlaneOrientationSequence', - 'ImageOrientationPatient', - [[0, 1, 0, 1, 0, 0]])[0] + fake_frame = fake_frames( + 'PlaneOrientationSequence', 'ImageOrientationPatient', [[0, 1, 0, 1, 0, 0]] + )[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] - assert_array_equal(MFW(fake_mf).image_orient_patient, - [[0, 1], [1, 0], [0, 0]]) + assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) fake_mf['SharedFunctionalGroupsSequence'] = [None] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_orient_patient fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] - assert_array_equal(MFW(fake_mf).image_orient_patient, - [[0, 1], [1, 0], [0, 0]]) + assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) def test_voxel_sizes(self): # Test voxel size calculation @@ -569,9 +566,7 @@ def test_voxel_sizes(self): with pytest.raises(didw.WrapperError): dw.voxel_sizes # Make a fake frame - fake_frame = fake_frames('PixelMeasuresSequence', - 'PixelSpacing', - [[2.1, 3.2]])[0] + fake_frame = fake_frames('PixelMeasuresSequence', 'PixelSpacing', [[2.1, 3.2]])[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] # Still not enough, we lack information for slice distances with pytest.raises(didw.WrapperError): @@ -593,9 +588,9 @@ def test_voxel_sizes(self): fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Decimals in any field are OK - fake_frame = fake_frames('PixelMeasuresSequence', - 'PixelSpacing', - [[Decimal('2.1'), Decimal('3.2')]])[0] + fake_frame = fake_frames( + 'PixelMeasuresSequence', 'PixelSpacing', [[Decimal('2.1'), Decimal('3.2')]] + )[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] fake_mf['SpacingBetweenSlices'] = Decimal('4.3') assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 4.3]) @@ -610,9 +605,9 @@ def test_image_position(self): with pytest.raises(didw.WrapperError): dw.image_position # Make a fake frame - fake_frame = fake_frames('PlanePositionSequence', - 'ImagePositionPatient', - [[-2.0, 3., 7]])[0] + fake_frame = fake_frames( + 'PlanePositionSequence', 'ImagePositionPatient', [[-2.0, 3.0, 7]] + )[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) fake_mf['SharedFunctionalGroupsSequence'] = [None] @@ -622,7 +617,8 @@ def test_image_position(self): assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) # Check lists of Decimals work fake_frame.PlanePositionSequence[0].ImagePositionPatient = [ - Decimal(str(v)) for v in [-2, 3, 7]] + Decimal(str(v)) for v in [-2, 3, 7] + ] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) assert MFW(fake_mf).image_position.dtype == float @@ -656,14 +652,14 @@ def test_data_derived_shape(self): # Test 4D diffusion data with an additional trace volume included # Excludes the trace volume and generates the correct shape dw = didw.wrapper_from_file(DATA_FILE_4D_DERIVED) - with pytest.warns(UserWarning, match="Derived images found and removed"): + with pytest.warns(UserWarning, match='Derived images found and removed'): assert dw.image_shape == (96, 96, 60, 33) @dicom_test @needs_nibabel_data('nitest-dicom') def test_data_unreadable_private_headers(self): # Test CT image with unreadable CSA tags - with pytest.warns(UserWarning, match="Error while attempting to read CSA header"): + with pytest.warns(UserWarning, match='Error while attempting to read CSA header'): dw = didw.wrapper_from_file(DATA_FILE_CT) assert dw.image_shape == (512, 571) @@ -724,13 +720,13 @@ def test_data_fake(self): [1, 4, 1, 2], [1, 2, 1, 2], [1, 3, 1, 2], - [1, 1, 1, 2]] + [1, 1, 1, 2], + ] fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0)) shape = (2, 3, 4, 2, 2) data = np.arange(np.prod(shape)).reshape(shape) sorted_data = data.reshape(shape[:2] + (-1,), order='F') - order = [11, 9, 10, 8, 3, 1, 2, 0, - 15, 13, 14, 12, 7, 5, 6, 4] + order = [11, 9, 10, 8, 3, 1, 2, 0, 15, 13, 14, 12, 7, 5, 6, 4] sorted_data = sorted_data[..., np.argsort(order)] fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) @@ -745,9 +741,7 @@ def test__scale_data(self): fake_mf['RescaleSlope'] = 2.0 fake_mf['RescaleIntercept'] = -1.0 assert_array_equal(data * 2 - 1, dw._scale_data(data)) - fake_frame = fake_frames('PixelValueTransformationSequence', - 'RescaleSlope', - [3.0])[0] + fake_frame = fake_frames('PixelValueTransformationSequence', 'RescaleSlope', [3.0])[0] fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] # Lacking RescaleIntercept -> Error dw = MFW(fake_mf) diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index d0d20e574a..8a869c01db 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -1,5 +1,4 @@ -""" Testing diffusion parameter processing - +"""Testing diffusion parameter processing """ import numpy as np @@ -8,7 +7,7 @@ import pytest -from numpy.testing import (assert_array_almost_equal, assert_equal as np_assert_equal) +from numpy.testing import assert_array_almost_equal, assert_equal as np_assert_equal def test_b2q(): @@ -31,7 +30,7 @@ def test_b2q(): # no error if we up the tolerance q = B2q(B, tol=1) # Less massive negativity, dropping tol - B = np.diag([-1e-14, 10., 1]) + B = np.diag([-1e-14, 10.0, 1]) with pytest.raises(ValueError): B2q(B) assert_array_almost_equal(B2q(B, tol=5e-13), [0, 10, 0]) @@ -46,8 +45,8 @@ def test_q2bg(): # Conversion of q vector to b value and unit vector for pos in range(3): q_vec = np.zeros((3,)) - q_vec[pos] = 10. - np_assert_equal(q2bg(q_vec), (10, q_vec / 10.)) + q_vec[pos] = 10.0 + np_assert_equal(q2bg(q_vec), (10, q_vec / 10.0)) # Also - check array-like q_vec = [0, 1e-6, 0] np_assert_equal(q2bg(q_vec), (0, 0)) diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index 6e58931559..c7815cd6fb 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -1,4 +1,4 @@ -""" Testing Siemens CSA header reader +"""Testing Siemens CSA header reader """ import sys import struct @@ -8,8 +8,8 @@ def test_unpacker(): s = b'1234\x00\x01' - le_int, = struct.unpack('h', b'\x00\x01') + (le_int,) = struct.unpack('h', b'\x00\x01') if sys.byteorder == 'little': native_int = le_int swapped_int = be_int diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index ddfe68075c..edd20f9973 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -1,4 +1,4 @@ -""" Testing nicom.utils module +"""Testing nicom.utils module """ import re @@ -6,7 +6,7 @@ from .test_dicomwrappers import DATA, DATA_PHILIPS from ..utils import find_private_section -pydicom, _, setup_module = optional_package("pydicom") +pydicom, _, setup_module = optional_package('pydicom') def test_find_private_section_real(): diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index f1d5810775..48a010903a 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,11 +1,11 @@ -""" Utilities for working with DICOM datasets +"""Utilities for working with DICOM datasets """ from numpy.compat.py3k import asstr def find_private_section(dcm_data, group_no, creator): - """ Return start element in group `group_no` given creator name `creator` + """Return start element in group `group_no` given creator name `creator` Private attribute tags need to announce where they will go by putting a tag in the private group (here `group_no`) between elements 1 and 0xFF. The diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a951522c8d..625fe6baa9 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to NIfTI1 image format +"""Read / write access to NIfTI1 image format NIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/ """ @@ -28,7 +28,7 @@ from .spm99analyze import SpmAnalyzeHeader from .casting import have_binary128 -pdcm, have_dicom, _ = optional_package("pydicom") +pdcm, have_dicom, _ = optional_package('pydicom') # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes @@ -75,7 +75,7 @@ ('srow_y', 'f4', (4,)), # 296; 2nd row affine transform ('srow_z', 'f4', (4,)), # 312; 3rd row affine transform ('intent_name', 'S16'), # 328; name or meaning of data - ('magic', 'S4') # 344; must be 'ni1\0' or 'n+1\0' + ('magic', 'S4'), # 344; must be 'ni1\0' or 'n+1\0' ] # Full header numpy dtype @@ -91,166 +91,191 @@ _complex256t = np.void _dtdefs = ( # code, label, dtype definition, niistring - (0, 'none', np.void, ""), - (1, 'binary', np.void, ""), - (2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"), - (4, 'int16', np.int16, "NIFTI_TYPE_INT16"), - (8, 'int32', np.int32, "NIFTI_TYPE_INT32"), - (16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"), - (32, 'complex64', np.complex64, "NIFTI_TYPE_COMPLEX64"), - (64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"), - (128, 'RGB', np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1')]), "NIFTI_TYPE_RGB24"), + (0, 'none', np.void, ''), + (1, 'binary', np.void, ''), + (2, 'uint8', np.uint8, 'NIFTI_TYPE_UINT8'), + (4, 'int16', np.int16, 'NIFTI_TYPE_INT16'), + (8, 'int32', np.int32, 'NIFTI_TYPE_INT32'), + (16, 'float32', np.float32, 'NIFTI_TYPE_FLOAT32'), + (32, 'complex64', np.complex64, 'NIFTI_TYPE_COMPLEX64'), + (64, 'float64', np.float64, 'NIFTI_TYPE_FLOAT64'), + (128, 'RGB', np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')]), 'NIFTI_TYPE_RGB24'), (255, 'all', np.void, ''), - (256, 'int8', np.int8, "NIFTI_TYPE_INT8"), - (512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"), - (768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"), - (1024, 'int64', np.int64, "NIFTI_TYPE_INT64"), - (1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"), - (1536, 'float128', _float128t, "NIFTI_TYPE_FLOAT128"), - (1792, 'complex128', np.complex128, "NIFTI_TYPE_COMPLEX128"), - (2048, 'complex256', _complex256t, "NIFTI_TYPE_COMPLEX256"), - (2304, 'RGBA', np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1'), - ('A', 'u1')]), "NIFTI_TYPE_RGBA32"), + (256, 'int8', np.int8, 'NIFTI_TYPE_INT8'), + (512, 'uint16', np.uint16, 'NIFTI_TYPE_UINT16'), + (768, 'uint32', np.uint32, 'NIFTI_TYPE_UINT32'), + (1024, 'int64', np.int64, 'NIFTI_TYPE_INT64'), + (1280, 'uint64', np.uint64, 'NIFTI_TYPE_UINT64'), + (1536, 'float128', _float128t, 'NIFTI_TYPE_FLOAT128'), + (1792, 'complex128', np.complex128, 'NIFTI_TYPE_COMPLEX128'), + (2048, 'complex256', _complex256t, 'NIFTI_TYPE_COMPLEX256'), + ( + 2304, + 'RGBA', + np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1'), ('A', 'u1')]), + 'NIFTI_TYPE_RGBA32', + ), ) # Make full code alias bank, including dtype column data_type_codes = make_dt_codes(_dtdefs) # Transform (qform, sform) codes -xform_codes = Recoder(( # code, label, niistring - (0, 'unknown', "NIFTI_XFORM_UNKNOWN"), - (1, 'scanner', "NIFTI_XFORM_SCANNER_ANAT"), - (2, 'aligned', "NIFTI_XFORM_ALIGNED_ANAT"), - (3, 'talairach', "NIFTI_XFORM_TALAIRACH"), - (4, 'mni', "NIFTI_XFORM_MNI_152"), - (5, 'template', "NIFTI_XFORM_TEMPLATE_OTHER"), - ), fields=('code', 'label', 'niistring')) +xform_codes = Recoder( + ( # code, label, niistring + (0, 'unknown', 'NIFTI_XFORM_UNKNOWN'), + (1, 'scanner', 'NIFTI_XFORM_SCANNER_ANAT'), + (2, 'aligned', 'NIFTI_XFORM_ALIGNED_ANAT'), + (3, 'talairach', 'NIFTI_XFORM_TALAIRACH'), + (4, 'mni', 'NIFTI_XFORM_MNI_152'), + (5, 'template', 'NIFTI_XFORM_TEMPLATE_OTHER'), + ), + fields=('code', 'label', 'niistring'), +) # unit codes -unit_codes = Recoder(( # code, label - (0, 'unknown'), - (1, 'meter'), - (2, 'mm'), - (3, 'micron'), - (8, 'sec'), - (16, 'msec'), - (24, 'usec'), - (32, 'hz'), - (40, 'ppm'), - (48, 'rads')), fields=('code', 'label')) - -slice_order_codes = Recoder(( # code, label - (0, 'unknown'), - (1, 'sequential increasing', 'seq inc'), - (2, 'sequential decreasing', 'seq dec'), - (3, 'alternating increasing', 'alt inc'), - (4, 'alternating decreasing', 'alt dec'), - (5, 'alternating increasing 2', 'alt inc 2'), - (6, 'alternating decreasing 2', 'alt dec 2')), fields=('code', 'label')) - -intent_codes = Recoder(( - # code, label, parameters description tuple - (0, 'none', (), "NIFTI_INTENT_NONE"), - (2, 'correlation', ('p1 = DOF',), "NIFTI_INTENT_CORREL"), - (3, 't test', ('p1 = DOF',), "NIFTI_INTENT_TTEST"), - (4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF'), - "NIFTI_INTENT_FTEST"), - (5, 'z score', (), "NIFTI_INTENT_ZSCORE"), - (6, 'chi2', ('p1 = DOF',), "NIFTI_INTENT_CHISQ"), - # two parameter beta distribution - (7, 'beta', - ('p1=a', 'p2=b'), - "NIFTI_INTENT_BETA"), - # Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1 - (8, 'binomial', - ('p1 = number of trials', 'p2 = probability per trial'), - "NIFTI_INTENT_BINOM"), - # 2 parameter gamma - # Density(x) proportional to # x^(p1-1) * exp(-p2*x) - (9, 'gamma', - ('p1 = shape, p2 = scale', 2), - "NIFTI_INTENT_GAMMA"), - (10, 'poisson', - ('p1 = mean',), - "NIFTI_INTENT_POISSON"), - (11, 'normal', - ('p1 = mean', 'p2 = standard deviation',), - "NIFTI_INTENT_NORMAL"), - (12, 'non central f test', - ('p1 = numerator DOF', - 'p2 = denominator DOF', - 'p3 = numerator noncentrality parameter',), - "NIFTI_INTENT_FTEST_NONC"), - (13, 'non central chi2', - ('p1 = DOF', 'p2 = noncentrality parameter',), - "NIFTI_INTENT_CHISQ_NONC"), - (14, 'logistic', - ('p1 = location', 'p2 = scale',), - "NIFTI_INTENT_LOGISTIC"), - (15, 'laplace', - ('p1 = location', 'p2 = scale'), - "NIFTI_INTENT_LAPLACE"), - (16, 'uniform', - ('p1 = lower end', 'p2 = upper end'), - "NIFTI_INTENT_UNIFORM"), - (17, 'non central t test', - ('p1 = DOF', 'p2 = noncentrality parameter'), - "NIFTI_INTENT_TTEST_NONC"), - (18, 'weibull', - ('p1 = location', 'p2 = scale, p3 = power'), - "NIFTI_INTENT_WEIBULL"), - # p1 = 1 = 'half normal' distribution - # p1 = 2 = Rayleigh distribution - # p1 = 3 = Maxwell-Boltzmann distribution. - (19, 'chi', ('p1 = DOF',), "NIFTI_INTENT_CHI"), - (20, 'inverse gaussian', - ('pi = mu', 'p2 = lambda'), - "NIFTI_INTENT_INVGAUSS"), - (21, 'extreme value 1', - ('p1 = location', 'p2 = scale'), - "NIFTI_INTENT_EXTVAL"), - (22, 'p value', (), "NIFTI_INTENT_PVAL"), - (23, 'log p value', (), "NIFTI_INTENT_LOGPVAL"), - (24, 'log10 p value', (), "NIFTI_INTENT_LOG10PVAL"), - (1001, 'estimate', (), "NIFTI_INTENT_ESTIMATE"), - (1002, 'label', (), "NIFTI_INTENT_LABEL"), - (1003, 'neuroname', (), "NIFTI_INTENT_NEURONAME"), - (1004, 'general matrix', - ('p1 = M', 'p2 = N'), - "NIFTI_INTENT_GENMATRIX"), - (1005, 'symmetric matrix', ('p1 = M',), "NIFTI_INTENT_SYMMATRIX"), - (1006, 'displacement vector', (), "NIFTI_INTENT_DISPVECT"), - (1007, 'vector', (), "NIFTI_INTENT_VECTOR"), - (1008, 'pointset', (), "NIFTI_INTENT_POINTSET"), - (1009, 'triangle', (), "NIFTI_INTENT_TRIANGLE"), - (1010, 'quaternion', (), "NIFTI_INTENT_QUATERNION"), - (1011, 'dimensionless', (), "NIFTI_INTENT_DIMLESS"), - (2001, 'time series', - (), - "NIFTI_INTENT_TIME_SERIES", - "NIFTI_INTENT_TIMESERIES"), # this mis-spell occurs in the wild - (2002, 'node index', (), "NIFTI_INTENT_NODE_INDEX"), - (2003, 'rgb vector', (), "NIFTI_INTENT_RGB_VECTOR"), - (2004, 'rgba vector', (), "NIFTI_INTENT_RGBA_VECTOR"), - (2005, 'shape', (), "NIFTI_INTENT_SHAPE"), - # FSL-specific intent codes - codes used by FNIRT - # ($FSLDIR/warpfns/fnirt_file_reader.h:104) - (2006, 'fnirt disp field', (), 'FSL_FNIRT_DISPLACEMENT_FIELD'), - (2007, 'fnirt cubic spline coef', (), 'FSL_CUBIC_SPLINE_COEFFICIENTS'), - (2008, 'fnirt dct coef', (), 'FSL_DCT_COEFFICIENTS'), - (2009, 'fnirt quad spline coef', (), 'FSL_QUADRATIC_SPLINE_COEFFICIENTS'), - # FSL-specific intent codes - codes used by TOPUP - # ($FSLDIR/topup/topup_file_io.h:104) - (2016, 'topup cubic spline coef ', (), - 'FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS'), - (2017, 'topup quad spline coef', (), - 'FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS'), - (2018, 'topup field', (), 'FSL_TOPUP_FIELD'), -), fields=('code', 'label', 'parameters', 'niistring')) +unit_codes = Recoder( + ( # code, label + (0, 'unknown'), + (1, 'meter'), + (2, 'mm'), + (3, 'micron'), + (8, 'sec'), + (16, 'msec'), + (24, 'usec'), + (32, 'hz'), + (40, 'ppm'), + (48, 'rads'), + ), + fields=('code', 'label'), +) + +slice_order_codes = Recoder( + ( # code, label + (0, 'unknown'), + (1, 'sequential increasing', 'seq inc'), + (2, 'sequential decreasing', 'seq dec'), + (3, 'alternating increasing', 'alt inc'), + (4, 'alternating decreasing', 'alt dec'), + (5, 'alternating increasing 2', 'alt inc 2'), + (6, 'alternating decreasing 2', 'alt dec 2'), + ), + fields=('code', 'label'), +) + +intent_codes = Recoder( + ( + # code, label, parameters description tuple + (0, 'none', (), 'NIFTI_INTENT_NONE'), + (2, 'correlation', ('p1 = DOF',), 'NIFTI_INTENT_CORREL'), + (3, 't test', ('p1 = DOF',), 'NIFTI_INTENT_TTEST'), + (4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF'), 'NIFTI_INTENT_FTEST'), + (5, 'z score', (), 'NIFTI_INTENT_ZSCORE'), + (6, 'chi2', ('p1 = DOF',), 'NIFTI_INTENT_CHISQ'), + # two parameter beta distribution + (7, 'beta', ('p1=a', 'p2=b'), 'NIFTI_INTENT_BETA'), + # Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1 + ( + 8, + 'binomial', + ('p1 = number of trials', 'p2 = probability per trial'), + 'NIFTI_INTENT_BINOM', + ), + # 2 parameter gamma + # Density(x) proportional to # x^(p1-1) * exp(-p2*x) + (9, 'gamma', ('p1 = shape, p2 = scale', 2), 'NIFTI_INTENT_GAMMA'), + (10, 'poisson', ('p1 = mean',), 'NIFTI_INTENT_POISSON'), + ( + 11, + 'normal', + ( + 'p1 = mean', + 'p2 = standard deviation', + ), + 'NIFTI_INTENT_NORMAL', + ), + ( + 12, + 'non central f test', + ( + 'p1 = numerator DOF', + 'p2 = denominator DOF', + 'p3 = numerator noncentrality parameter', + ), + 'NIFTI_INTENT_FTEST_NONC', + ), + ( + 13, + 'non central chi2', + ( + 'p1 = DOF', + 'p2 = noncentrality parameter', + ), + 'NIFTI_INTENT_CHISQ_NONC', + ), + ( + 14, + 'logistic', + ( + 'p1 = location', + 'p2 = scale', + ), + 'NIFTI_INTENT_LOGISTIC', + ), + (15, 'laplace', ('p1 = location', 'p2 = scale'), 'NIFTI_INTENT_LAPLACE'), + (16, 'uniform', ('p1 = lower end', 'p2 = upper end'), 'NIFTI_INTENT_UNIFORM'), + ( + 17, + 'non central t test', + ('p1 = DOF', 'p2 = noncentrality parameter'), + 'NIFTI_INTENT_TTEST_NONC', + ), + (18, 'weibull', ('p1 = location', 'p2 = scale, p3 = power'), 'NIFTI_INTENT_WEIBULL'), + # p1 = 1 = 'half normal' distribution + # p1 = 2 = Rayleigh distribution + # p1 = 3 = Maxwell-Boltzmann distribution. + (19, 'chi', ('p1 = DOF',), 'NIFTI_INTENT_CHI'), + (20, 'inverse gaussian', ('pi = mu', 'p2 = lambda'), 'NIFTI_INTENT_INVGAUSS'), + (21, 'extreme value 1', ('p1 = location', 'p2 = scale'), 'NIFTI_INTENT_EXTVAL'), + (22, 'p value', (), 'NIFTI_INTENT_PVAL'), + (23, 'log p value', (), 'NIFTI_INTENT_LOGPVAL'), + (24, 'log10 p value', (), 'NIFTI_INTENT_LOG10PVAL'), + (1001, 'estimate', (), 'NIFTI_INTENT_ESTIMATE'), + (1002, 'label', (), 'NIFTI_INTENT_LABEL'), + (1003, 'neuroname', (), 'NIFTI_INTENT_NEURONAME'), + (1004, 'general matrix', ('p1 = M', 'p2 = N'), 'NIFTI_INTENT_GENMATRIX'), + (1005, 'symmetric matrix', ('p1 = M',), 'NIFTI_INTENT_SYMMATRIX'), + (1006, 'displacement vector', (), 'NIFTI_INTENT_DISPVECT'), + (1007, 'vector', (), 'NIFTI_INTENT_VECTOR'), + (1008, 'pointset', (), 'NIFTI_INTENT_POINTSET'), + (1009, 'triangle', (), 'NIFTI_INTENT_TRIANGLE'), + (1010, 'quaternion', (), 'NIFTI_INTENT_QUATERNION'), + (1011, 'dimensionless', (), 'NIFTI_INTENT_DIMLESS'), + ( + 2001, + 'time series', + (), + 'NIFTI_INTENT_TIME_SERIES', + 'NIFTI_INTENT_TIMESERIES', + ), # this mis-spell occurs in the wild + (2002, 'node index', (), 'NIFTI_INTENT_NODE_INDEX'), + (2003, 'rgb vector', (), 'NIFTI_INTENT_RGB_VECTOR'), + (2004, 'rgba vector', (), 'NIFTI_INTENT_RGBA_VECTOR'), + (2005, 'shape', (), 'NIFTI_INTENT_SHAPE'), + # FSL-specific intent codes - codes used by FNIRT + # ($FSLDIR/warpfns/fnirt_file_reader.h:104) + (2006, 'fnirt disp field', (), 'FSL_FNIRT_DISPLACEMENT_FIELD'), + (2007, 'fnirt cubic spline coef', (), 'FSL_CUBIC_SPLINE_COEFFICIENTS'), + (2008, 'fnirt dct coef', (), 'FSL_DCT_COEFFICIENTS'), + (2009, 'fnirt quad spline coef', (), 'FSL_QUADRATIC_SPLINE_COEFFICIENTS'), + # FSL-specific intent codes - codes used by TOPUP + # ($FSLDIR/topup/topup_file_io.h:104) + (2016, 'topup cubic spline coef ', (), 'FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS'), + (2017, 'topup quad spline coef', (), 'FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS'), + (2018, 'topup field', (), 'FSL_TOPUP_FIELD'), + ), + fields=('code', 'label', 'parameters', 'niistring'), +) class Nifti1Extension: @@ -331,8 +356,7 @@ def get_content(self): return self._content def get_sizeondisk(self): - """Return the size of the extension in the NIfTI file. - """ + """Return the size of the extension in the NIfTI file.""" # need raw value size plus 8 bytes for esize and ecode size = len(self._mangle(self._content)) size += 8 @@ -358,7 +382,7 @@ def __ne__(self, other): return not self == other def write_to(self, fileobj, byteswap): - """ Write header extensions to fileobj + """Write header extensions to fileobj Write starts at fileobj current file position. @@ -397,6 +421,7 @@ class Nifti1DicomExtension(Nifti1Extension): and content is the raw bytestring loaded directly from the nifti file header. """ + def __init__(self, code, content, parent_hdr=None): """ Parameters @@ -434,15 +459,16 @@ def __init__(self, code, content, parent_hdr=None): elif isinstance(content, bytes): # Got a byte string - unmangle it self._raw_content = content self._is_implicit_VR = self._guess_implicit_VR() - ds = self._unmangle(content, self._is_implicit_VR, - self._is_little_endian) + ds = self._unmangle(content, self._is_implicit_VR, self._is_little_endian) self._content = ds elif content is None: # initialize a new dicom dataset self._is_implicit_VR = False self._content = pdcm.dataset.Dataset() else: - raise TypeError(f"content must be either a bytestring or a pydicom Dataset. " - f"Got {content.__class__}") + raise TypeError( + f'content must be either a bytestring or a pydicom Dataset. ' + f'Got {content.__class__}' + ) def _guess_implicit_VR(self): """Try to guess DICOM syntax by checking for valid VRs. @@ -461,9 +487,7 @@ def _guess_implicit_VR(self): def _unmangle(self, value, is_implicit_VR=False, is_little_endian=True): bio = BytesIO(value) - ds = pdcm.filereader.read_dataset(bio, - is_implicit_VR, - is_little_endian) + ds = pdcm.filereader.read_dataset(bio, is_implicit_VR, is_little_endian) return ds def _mangle(self, dataset): @@ -480,22 +504,24 @@ def _mangle(self, dataset): # see nifti1_io.h for a complete list of all known extensions and # references to their description or contacts of the respective # initiators -extension_codes = Recoder(( - (0, "ignore", Nifti1Extension), - (2, "dicom", Nifti1DicomExtension if have_dicom else Nifti1Extension), - (4, "afni", Nifti1Extension), - (6, "comment", Nifti1Extension), - (8, "xcede", Nifti1Extension), - (10, "jimdiminfo", Nifti1Extension), - (12, "workflow_fwds", Nifti1Extension), - (14, "freesurfer", Nifti1Extension), - (16, "pypickle", Nifti1Extension), -), fields=('code', 'label', 'handler')) +extension_codes = Recoder( + ( + (0, 'ignore', Nifti1Extension), + (2, 'dicom', Nifti1DicomExtension if have_dicom else Nifti1Extension), + (4, 'afni', Nifti1Extension), + (6, 'comment', Nifti1Extension), + (8, 'xcede', Nifti1Extension), + (10, 'jimdiminfo', Nifti1Extension), + (12, 'workflow_fwds', Nifti1Extension), + (14, 'freesurfer', Nifti1Extension), + (16, 'pypickle', Nifti1Extension), + ), + fields=('code', 'label', 'handler'), +) class Nifti1Extensions(list): - """Simple extension collection, implemented as a list-subclass. - """ + """Simple extension collection, implemented as a list-subclass.""" def count(self, ecode): """Returns the number of extensions matching a given *ecode*. @@ -517,15 +543,14 @@ def get_codes(self): return [e.get_code() for e in self] def get_sizeondisk(self): - """Return the size of the complete header extensions in the NIfTI file. - """ + """Return the size of the complete header extensions in the NIfTI file.""" return np.sum([e.get_sizeondisk() for e in self]) def __repr__(self): - return "Nifti1Extensions(%s)" % ', '.join(str(e) for e in self) + return 'Nifti1Extensions(%s)' % ', '.join(str(e) for e in self) def write_to(self, fileobj, byteswap): - """ Write header extensions to fileobj + """Write header extensions to fileobj Write starts at fileobj current file position. @@ -588,7 +613,8 @@ def from_fileobj(klass, fileobj, size, byteswap): warnings.warn( 'Extension size is not a multiple of 16 bytes; ' 'Assuming size is correct and hoping for the best', - UserWarning) + UserWarning, + ) # read extension itself; esize includes the 8 bytes already read evalue = fileobj.read(int(esize - 8)) if not len(evalue) == esize - 8: @@ -610,7 +636,7 @@ def from_fileobj(klass, fileobj, size, byteswap): class Nifti1Header(SpmAnalyzeHeader): - """ Class for NIfTI1 header + """Class for NIfTI1 header The NIfTI1 header has many more coded fields than the simpler Analyze variants. NIfTI1 headers also have extensions. @@ -622,16 +648,19 @@ class Nifti1Header(SpmAnalyzeHeader): This class handles the header-preceding-data case. """ + # Copies of module level definitions template_dtype = header_dtype _data_type_codes = data_type_codes # fields with recoders for their values - _field_recoders = {'datatype': data_type_codes, - 'qform_code': xform_codes, - 'sform_code': xform_codes, - 'intent_code': intent_codes, - 'slice_code': slice_order_codes} + _field_recoders = { + 'datatype': data_type_codes, + 'qform_code': xform_codes, + 'sform_code': xform_codes, + 'intent_code': intent_codes, + 'slice_code': slice_order_codes, + } # data scaling capabilities has_data_slope = True @@ -655,28 +684,17 @@ class Nifti1Header(SpmAnalyzeHeader): # Quaternion threshold near 0, based on float32 precision quaternion_threshold = -np.finfo(np.float32).eps * 3 - def __init__(self, - binaryblock=None, - endianness=None, - check=True, - extensions=()): - """ Initialize header from binary data block and extensions - """ - super(Nifti1Header, self).__init__(binaryblock, - endianness, - check) + def __init__(self, binaryblock=None, endianness=None, check=True, extensions=()): + """Initialize header from binary data block and extensions""" + super(Nifti1Header, self).__init__(binaryblock, endianness, check) self.extensions = self.exts_klass(extensions) def copy(self): - """ Return copy of header + """Return copy of header Take reference to extensions as well as copy of header contents """ - return self.__class__( - self.binaryblock, - self.endianness, - False, - self.extensions) + return self.__class__(self.binaryblock, self.endianness, False, self.extensions) @classmethod def from_fileobj(klass, fileobj, endianness=None, check=True): @@ -696,21 +714,20 @@ def from_fileobj(klass, fileobj, endianness=None, check=True): else: # otherwise read until the beginning of the data extsize = hdr._structarr['vox_offset'] - fileobj.tell() byteswap = endian_codes['native'] != hdr.endianness - hdr.extensions = klass.exts_klass.from_fileobj(fileobj, extsize, - byteswap) + hdr.extensions = klass.exts_klass.from_fileobj(fileobj, extsize, byteswap) return hdr def write_to(self, fileobj): # First check that vox offset is large enough; set if necessary if self.is_single: vox_offset = self._structarr['vox_offset'] - min_vox_offset = (self.single_vox_offset + - self.extensions.get_sizeondisk()) + min_vox_offset = self.single_vox_offset + self.extensions.get_sizeondisk() if vox_offset == 0: # vox offset unset; set as necessary self._structarr['vox_offset'] = min_vox_offset elif vox_offset < min_vox_offset: raise HeaderDataError( - f'vox offset set to {vox_offset}, but need at least {min_vox_offset}') + f'vox offset set to {vox_offset}, but need at least {min_vox_offset}' + ) super(Nifti1Header, self).write_to(fileobj) # Write extensions if len(self.extensions) == 0: @@ -724,7 +741,7 @@ def write_to(self, fileobj): self.extensions.write_to(fileobj, byteswap) def get_best_affine(self): - """ Select best of available transforms """ + """Select best of available transforms""" hdr = self._structarr if hdr['sform_code'] != 0: return self.get_sform() @@ -734,7 +751,7 @@ def get_best_affine(self): @classmethod def default_structarr(klass, endianness=None): - """ Create empty header binary block with given endianness """ + """Create empty header binary block with given endianness""" hdr_data = super(Nifti1Header, klass).default_structarr(endianness) if klass.is_single: hdr_data['magic'] = klass.single_magic @@ -744,7 +761,7 @@ def default_structarr(klass, endianness=None): @classmethod def from_header(klass, header=None, check=True): - """ Class method to create header from another header + """Class method to create header from another header Extend Analyze header copy by copying extensions from other Nifti types. @@ -768,7 +785,7 @@ def from_header(klass, header=None, check=True): return new_hdr def get_data_shape(self): - """ Get shape of data + """Get shape of data Examples -------- @@ -797,8 +814,9 @@ def get_data_shape(self): if shape[:3] == (-1, 1, 1): vec_len = int(self._structarr['glmin']) if vec_len == 0: - raise HeaderDataError('-1 in dim[1] but 0 in glmin; ' - 'inconsistent freesurfer type header?') + raise HeaderDataError( + '-1 in dim[1] but 0 in glmin; ' 'inconsistent freesurfer type header?' + ) return (vec_len, 1, 1) + shape[3:] # Apply freesurfer hack for ico7 surface elif shape[:3] == (27307, 1, 6): @@ -807,7 +825,7 @@ def get_data_shape(self): return shape def set_data_shape(self, shape): - """ Set shape of data # noqa + """Set shape of data # noqa If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -864,8 +882,11 @@ def set_data_shape(self, shape): if shape[:3] == (163842, 1, 1): shape = (27307, 1, 6) + shape[3:] # Apply freesurfer hack for large vectors - elif (len(shape) >= 3 and shape[1:3] == (1, 1) and - shape[0] > np.iinfo(hdr['dim'].dtype.base).max): + elif ( + len(shape) >= 3 + and shape[1:3] == (1, 1) + and shape[0] > np.iinfo(hdr['dim'].dtype.base).max + ): try: hdr['glmin'] = shape[0] except OverflowError: @@ -874,13 +895,16 @@ def set_data_shape(self, shape): overflow = hdr['glmin'] != shape[0] if overflow: raise HeaderDataError(f'shape[0] {shape[0]} does not fit in glmax datatype') - warnings.warn('Using large vector Freesurfer hack; header will ' - 'not be compatible with SPM or FSL', stacklevel=2) + warnings.warn( + 'Using large vector Freesurfer hack; header will ' + 'not be compatible with SPM or FSL', + stacklevel=2, + ) shape = (-1, 1, 1) + shape[3:] super(Nifti1Header, self).set_data_shape(shape) def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code or dtype or type + """Set numpy dtype for data from code or dtype or type Using :py:class:`int` or ``"int"`` is disallowed, as these types will be interpreted as ``np.int64``, which is almost never desired. @@ -921,13 +945,15 @@ def set_data_dtype(self, datatype): >>> hdr.get_data_dtype() == np.dtype('int64') True """ - if not isinstance(datatype, np.dtype) and datatype in (int, "int"): - raise ValueError(f"Invalid data type {datatype!r}. Specify a sized integer, " - "e.g., 'uint8' or numpy.int16.") + if not isinstance(datatype, np.dtype) and datatype in (int, 'int'): + raise ValueError( + f'Invalid data type {datatype!r}. Specify a sized integer, ' + "e.g., 'uint8' or numpy.int16." + ) super().set_data_dtype(datatype) def get_qform_quaternion(self): - """ Compute quaternion from b, c, d of quaternion + """Compute quaternion from b, c, d of quaternion Fills a value by assuming this is a unit quaternion """ @@ -937,7 +963,7 @@ def get_qform_quaternion(self): return fillpositive(bcd, self.quaternion_threshold) def get_qform(self, coded=False): - """ Return 4x4 affine matrix from qform parameters in header + """Return 4x4 affine matrix from qform parameters in header Parameters ---------- @@ -978,7 +1004,7 @@ def get_qform(self, coded=False): return out def set_qform(self, affine, code=None, strip_shears=True): - """ Set qform header values from 4x4 affine + """Set qform header values from 4x4 affine Parameters ---------- @@ -1069,8 +1095,7 @@ def set_qform(self, affine, code=None, strip_shears=True): P, S, Qs = npl.svd(R) PR = np.dot(P, Qs) if not strip_shears and not np.allclose(PR, R): - raise HeaderDataError("Shears in affine and `strip_shears` is " - "False") + raise HeaderDataError('Shears in affine and `strip_shears` is ' 'False') # Convert to quaternion quat = mat2quat(PR) # Set into header @@ -1080,7 +1105,7 @@ def set_qform(self, affine, code=None, strip_shears=True): hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d'] = quat[1:] def get_sform(self, coded=False): - """ Return 4x4 affine matrix from sform parameters in header + """Return 4x4 affine matrix from sform parameters in header Parameters ---------- @@ -1111,7 +1136,7 @@ def get_sform(self, coded=False): return out def set_sform(self, affine, code=None): - """ Set sform transform from 4x4 affine + """Set sform transform from 4x4 affine Parameters ---------- @@ -1173,7 +1198,7 @@ def set_sform(self, affine, code=None): hdr['srow_z'][:] = affine[2, :] def get_slope_inter(self): - """ Get data scaling (slope) and DC offset (intercept) from header data + """Get data scaling (slope) and DC offset (intercept) from header data Returns ------- @@ -1216,7 +1241,7 @@ def get_slope_inter(self): return slope, inter def set_slope_inter(self, slope, inter=None): - """ Set slope and / or intercept into header + """Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -1250,7 +1275,7 @@ def set_slope_inter(self, slope, inter=None): self._structarr['scl_inter'] = inter def get_dim_info(self): - """ Gets NIfTI MRI slice etc dimension information + """Gets NIfTI MRI slice etc dimension information Returns ------- @@ -1280,12 +1305,14 @@ def get_dim_info(self): freq = info & 3 phase = (info >> 2) & 3 slice = (info >> 4) & 3 - return (freq - 1 if freq else None, - phase - 1 if phase else None, - slice - 1 if slice else None) + return ( + freq - 1 if freq else None, + phase - 1 if phase else None, + slice - 1 if slice else None, + ) def set_dim_info(self, freq=None, phase=None, slice=None): - """ Sets nifti MRI slice etc dimension information + """Sets nifti MRI slice etc dimension information Parameters ---------- @@ -1332,7 +1359,7 @@ def set_dim_info(self, freq=None, phase=None, slice=None): self._structarr['dim_info'] = info def get_intent(self, code_repr='label'): - """ Get intent code, parameters and name + """Get intent code, parameters and name Parameters ---------- @@ -1377,7 +1404,7 @@ def get_intent(self, code_repr='label'): return label, tuple(params), name def set_intent(self, code, params=(), name='', allow_unknown=False): - """ Set the intent code, parameters and name + """Set the intent code, parameters and name If parameters are not specified, assumed to be all zero. Each intent code has a set number of parameters associated. If you @@ -1444,12 +1471,12 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): hdr['intent_code'] = icode hdr['intent_name'] = name all_params = [0] * 3 - all_params[:len(params)] = params[:] + all_params[: len(params)] = params[:] for i, param in enumerate(all_params): hdr['intent_p%d' % (i + 1)] = param def get_slice_duration(self): - """ Get slice duration + """Get slice duration Returns ------- @@ -1471,12 +1498,11 @@ def get_slice_duration(self): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' - 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') return float(self._structarr['slice_duration']) def set_slice_duration(self, duration): - """ Set slice duration + """Set slice duration Parameters ---------- @@ -1489,27 +1515,25 @@ def set_slice_duration(self, duration): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' - 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') self._structarr['slice_duration'] = duration def get_n_slices(self): - """ Return the number of slices - """ + """Return the number of slices""" _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension not set in header ' - 'dim_info') + raise HeaderDataError('Slice dimension not set in header ' 'dim_info') shape = self.get_data_shape() try: slice_len = shape[slice_dim] except IndexError: - raise HeaderDataError(f'Slice dimension index ({slice_dim}) ' - f'outside shape tuple ({shape})') + raise HeaderDataError( + f'Slice dimension index ({slice_dim}) ' f'outside shape tuple ({shape})' + ) return slice_len def get_slice_times(self): - """ Get slice times from slice timing information + """Get slice times from slice timing information Returns ------- @@ -1537,10 +1561,8 @@ def get_slice_times(self): duration = self.get_slice_duration() slabel = self.get_value_label('slice_code') if slabel == 'unknown': - raise HeaderDataError('Cannot get slice times when ' - 'Slice code is "unknown"') - slice_start, slice_end = (int(hdr['slice_start']), - int(hdr['slice_end'])) + raise HeaderDataError('Cannot get slice times when ' 'Slice code is "unknown"') + slice_start, slice_end = (int(hdr['slice_start']), int(hdr['slice_end'])) if slice_start < 0: raise HeaderDataError('slice_start should be >= 0') if slice_end == 0: @@ -1550,12 +1572,10 @@ def get_slice_times(self): raise HeaderDataError('slice_end should be > slice_start') st_order = self._slice_time_order(slabel, n_timed) times = st_order * duration - return ((None,) * slice_start + - tuple(times) + - (None,) * (slice_len - slice_end - 1)) + return (None,) * slice_start + tuple(times) + (None,) * (slice_len - slice_end - 1) def set_slice_times(self, slice_times): - """ Set slice times into *hdr* + """Set slice times into *hdr* Parameters ---------- @@ -1582,8 +1602,7 @@ def set_slice_times(self, slice_times): hdr = self._structarr slice_len = self.get_n_slices() if slice_len != len(slice_times): - raise HeaderDataError('Number of slice times does not ' - 'match number of slices') + raise HeaderDataError('Number of slice times does not ' 'match number of slices') # Extract Nones at beginning and end. Check for others for ind, time in enumerate(slice_times): if time is not None: @@ -1595,17 +1614,15 @@ def set_slice_times(self, slice_times): if time is not None: slice_end = slice_len - ind - 1 break - timed = slice_times[slice_start:slice_end + 1] + timed = slice_times[slice_start : slice_end + 1] for time in timed: if time is None: - raise HeaderDataError('Cannot have None in middle ' - 'of slice time vector') + raise HeaderDataError('Cannot have None in middle ' 'of slice time vector') # Find slice duration, check times are compatible with single # duration tdiffs = np.diff(np.sort(timed)) if not np.allclose(np.diff(tdiffs), 0): - raise HeaderDataError('Slice times not compatible with ' - 'single slice duration') + raise HeaderDataError('Slice times not compatible with ' 'single slice duration') duration = np.mean(tdiffs) # To slice time order st_order = np.round(np.array(timed) / duration) @@ -1617,9 +1634,7 @@ def set_slice_times(self, slice_times): matching_labels = [] for label in labels: - if np.all(st_order == self._slice_time_order( - label, - n_timed)): + if np.all(st_order == self._slice_time_order(label, n_timed)): matching_labels.append(label) if not matching_labels: @@ -1627,7 +1642,8 @@ def set_slice_times(self, slice_times): if len(matching_labels) > 1: warnings.warn( f"Multiple slice orders satisfy: {', '.join(matching_labels)}. " - "Choosing the first one") + 'Choosing the first one' + ) label = matching_labels[0] # Set values into header hdr['slice_start'] = slice_start @@ -1636,23 +1652,23 @@ def set_slice_times(self, slice_times): hdr['slice_code'] = slice_order_codes.code[label] def _slice_time_order(self, slabel, n_slices): - """ Supporting function to give time order of slices from label """ + """Supporting function to give time order of slices from label""" if slabel == 'sequential increasing': sp_ind_time_order = list(range(n_slices)) elif slabel == 'sequential decreasing': sp_ind_time_order = list(range(n_slices)[::-1]) elif slabel == 'alternating increasing': - sp_ind_time_order = (list(range(0, n_slices, 2)) + - list(range(1, n_slices, 2))) + sp_ind_time_order = list(range(0, n_slices, 2)) + list(range(1, n_slices, 2)) elif slabel == 'alternating decreasing': - sp_ind_time_order = (list(range(n_slices - 1, -1, -2)) + - list(range(n_slices - 2, -1, -2))) + sp_ind_time_order = list(range(n_slices - 1, -1, -2)) + list( + range(n_slices - 2, -1, -2) + ) elif slabel == 'alternating increasing 2': - sp_ind_time_order = (list(range(1, n_slices, 2)) + - list(range(0, n_slices, 2))) + sp_ind_time_order = list(range(1, n_slices, 2)) + list(range(0, n_slices, 2)) elif slabel == 'alternating decreasing 2': - sp_ind_time_order = (list(range(n_slices - 2, -1, -2)) + - list(range(n_slices - 1, -1, -2))) + sp_ind_time_order = list(range(n_slices - 2, -1, -2)) + list( + range(n_slices - 1, -1, -2) + ) else: raise HeaderDataError(f'We do not handle slice ordering "{slabel}"') return np.argsort(sp_ind_time_order) @@ -1660,8 +1676,7 @@ def _slice_time_order(self, slabel, n_slices): def get_xyzt_units(self): xyz_code = self.structarr['xyzt_units'] % 8 t_code = self.structarr['xyzt_units'] - xyz_code - return (unit_codes.label[xyz_code], - unit_codes.label[t_code]) + return (unit_codes.label[xyz_code], unit_codes.label[t_code]) def set_xyzt_units(self, xyz=None, t=None): if xyz is None: @@ -1675,7 +1690,7 @@ def set_xyzt_units(self, xyz=None, t=None): self.structarr['xyzt_units'] = xyz_code + t_code def _clean_after_mapping(self): - """ Set format-specific stuff after converting header from mapping + """Set format-specific stuff after converting header from mapping Clean up header after it has been initialized from an ``as_analyze_map`` method of another header type @@ -1683,8 +1698,7 @@ def _clean_after_mapping(self): See :meth:`nibabel.analyze.AnalyzeHeader._clean_after_mapping` for a more detailed description. """ - self._structarr['magic'] = (self.single_magic if self.is_single - else self.pair_magic) + self._structarr['magic'] = self.single_magic if self.is_single else self.pair_magic """ Checks only below here """ @@ -1692,15 +1706,17 @@ def _clean_after_mapping(self): def _get_checks(klass): # We need to return our own versions of - e.g. chk_datatype, to # pick up the Nifti datatypes from our class - return (klass._chk_sizeof_hdr, - klass._chk_datatype, - klass._chk_bitpix, - klass._chk_pixdims, - klass._chk_qfac, - klass._chk_magic, - klass._chk_offset, - klass._chk_qform_code, - klass._chk_sform_code) + return ( + klass._chk_sizeof_hdr, + klass._chk_datatype, + klass._chk_bitpix, + klass._chk_pixdims, + klass._chk_qfac, + klass._chk_magic, + klass._chk_offset, + klass._chk_qform_code, + klass._chk_sform_code, + ) @staticmethod def _chk_qfac(hdr, fix=False): @@ -1736,8 +1752,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep if magic == hdr.single_magic and offset < hdr.single_vox_offset: rep.problem_level = 40 - rep.problem_msg = ('vox offset %d too low for ' - 'single file nifti1' % offset) + rep.problem_msg = 'vox offset %d too low for ' 'single file nifti1' % offset if fix: hdr['vox_offset'] = hdr.single_vox_offset rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' @@ -1780,20 +1795,22 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) return hdr_struct['magic'] in (b'ni1', b'n+1') class Nifti1PairHeader(Nifti1Header): - """ Class for NIfTI1 pair header """ + """Class for NIfTI1 pair header""" + # Signal whether this is single (header + data) file is_single = False class Nifti1Pair(analyze.AnalyzeImage): - """ Class for NIfTI1 format image, header pair - """ + """Class for NIfTI1 format image, header pair""" + header_class = Nifti1PairHeader _meta_sniff_len = header_class.sizeof_hdr rw = True @@ -1802,8 +1819,7 @@ class Nifti1Pair(analyze.AnalyzeImage): # the data at serialization time _dtype_alias = None - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None, dtype=None): + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None): # Special carve-out for 64 bit integers # See GitHub issues # * https://github.com/nipy/nibabel/issues/1046 @@ -1811,24 +1827,24 @@ def __init__(self, dataobj, affine, header=None, # This only applies to NIfTI because the parent Analyze formats did # not support 64-bit integer data, so `set_data_dtype(int64)` would # already fail. - danger_dts = (np.dtype("int64"), np.dtype("uint64")) + danger_dts = (np.dtype('int64'), np.dtype('uint64')) if header is None and dtype is None and get_obj_dtype(dataobj) in danger_dts: - msg = (f"Image data has type {dataobj.dtype}, which may cause " - "incompatibilities with other tools. This will error in " - "NiBabel 5.0. This warning can be silenced " - f"by passing the dtype argument to {self.__class__.__name__}().") + msg = ( + f'Image data has type {dataobj.dtype}, which may cause ' + 'incompatibilities with other tools. This will error in ' + 'NiBabel 5.0. This warning can be silenced ' + f'by passing the dtype argument to {self.__class__.__name__}().' + ) warnings.warn(msg, FutureWarning, stacklevel=2) - super(Nifti1Pair, self).__init__(dataobj, - affine, - header, - extra, - file_map, - dtype) + super(Nifti1Pair, self).__init__(dataobj, affine, header, extra, file_map, dtype) # Force set of s/q form when header is None unless affine is also None if header is None and affine is not None: self._affine2header() + # Copy docstring - __init__.__doc__ = analyze.AnalyzeImage.__init__.__doc__ + """ + __init__.__doc__ = ( + analyze.AnalyzeImage.__init__.__doc__ + + """ Notes ----- @@ -1841,9 +1857,10 @@ def __init__(self, dataobj, affine, header=None, :meth:`set_qform` methods can be used to update the codes after an image has been created - see those methods, and the :ref:`manual ` for more details. """ + ) def update_header(self): - """ Harmonize header with image data and affine + """Harmonize header with image data and affine See AnalyzeImage.update_header for more examples @@ -1863,7 +1880,7 @@ def update_header(self): hdr['magic'] = hdr.pair_magic def _affine2header(self): - """ Unconditionally set affine into the header """ + """Unconditionally set affine into the header""" hdr = self._header # Set affine into sform with default code hdr.set_sform(self._affine, code='aligned') @@ -1871,7 +1888,7 @@ def _affine2header(self): hdr.set_qform(self._affine, code='unknown') def get_qform(self, coded=False): - """ Return 4x4 affine matrix from qform parameters in header + """Return 4x4 affine matrix from qform parameters in header Parameters ---------- @@ -1897,7 +1914,7 @@ def get_qform(self, coded=False): return self._header.get_qform(coded) def set_qform(self, affine, code=None, strip_shears=True, **kwargs): - """ Set qform header values from 4x4 affine + """Set qform header values from 4x4 affine Parameters ---------- @@ -1958,7 +1975,7 @@ def set_qform(self, affine, code=None, strip_shears=True, **kwargs): self._affine[:] = self._header.get_best_affine() def get_sform(self, coded=False): - """ Return 4x4 affine matrix from sform parameters in header + """Return 4x4 affine matrix from sform parameters in header Parameters ---------- @@ -1984,7 +2001,7 @@ def get_sform(self, coded=False): return self._header.get_sform(coded) def set_sform(self, affine, code=None, **kwargs): - """ Set sform transform from 4x4 affine + """Set sform transform from 4x4 affine Parameters ---------- @@ -2047,7 +2064,7 @@ def set_sform(self, affine, code=None, **kwargs): self._affine[:] = self._header.get_best_affine() def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code, dtype, type or alias + """Set numpy dtype for data from code, dtype, type or alias Using :py:class:`int` or ``"int"`` is disallowed, as these types will be interpreted as ``np.int64``, which is almost never desired. @@ -2147,7 +2164,7 @@ def set_data_dtype(self, datatype): super().set_data_dtype(datatype) def get_data_dtype(self, finalize=False): - """ Get numpy dtype for data + """Get numpy dtype for data If ``set_data_dtype()`` has been called with an alias and ``finalize`` is ``False``, return the alias. @@ -2163,22 +2180,24 @@ def get_data_dtype(self, finalize=False): datatype = None if self._dtype_alias == 'compat': datatype = _get_analyze_compat_dtype(self._dataobj) - descrip = "an Analyze-compatible dtype" + descrip = 'an Analyze-compatible dtype' elif self._dtype_alias == 'smallest': datatype = _get_smallest_dtype(self._dataobj) - descrip = "an integer type with fewer than 64 bits" + descrip = 'an integer type with fewer than 64 bits' else: - raise ValueError(f"Unknown dtype alias {self._dtype_alias}.") + raise ValueError(f'Unknown dtype alias {self._dtype_alias}.') if datatype is None: dt = get_obj_dtype(self._dataobj) - raise ValueError(f"Cannot automatically cast array (of type {dt}) to {descrip}." - " Please set_data_dtype() to an explicit data type.") + raise ValueError( + f'Cannot automatically cast array (of type {dt}) to {descrip}.' + ' Please set_data_dtype() to an explicit data type.' + ) self.set_data_dtype(datatype) # Clears the alias return super().get_data_dtype() def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -2219,7 +2238,8 @@ def as_reoriented(self, ornt): # Also apply the transform to the dim_info fields new_dim = [ None if orig_dim is None else int(ornt[orig_dim, 0]) - for orig_dim in img.header.get_dim_info()] + for orig_dim in img.header.get_dim_info() + ] img.header.set_dim_info(*new_dim) @@ -2227,15 +2247,15 @@ def as_reoriented(self, ornt): class Nifti1Image(Nifti1Pair, SerializableImage): - """ Class for single file NIfTI1 format image - """ + """Class for single file NIfTI1 format image""" + header_class = Nifti1Header valid_exts = ('.nii',) files_types = (('image', '.nii'),) @staticmethod def _get_fileholders(file_map): - """ Return fileholder for header and image + """Return fileholder for header and image For single-file niftis, the fileholder for the header and the image will be the same @@ -2243,14 +2263,14 @@ def _get_fileholders(file_map): return file_map['image'], file_map['image'] def update_header(self): - """ Harmonize header with image data and affine """ + """Harmonize header with image data and affine""" super(Nifti1Image, self).update_header() hdr = self._header hdr['magic'] = hdr.single_magic def load(filename): - """ Load NIfTI1 single or pair from `filename` + """Load NIfTI1 single or pair from `filename` Parameters ---------- @@ -2277,7 +2297,7 @@ def load(filename): def save(img, filename): - """ Save NIfTI1 single or pair to `filename` + """Save NIfTI1 single or pair to `filename` Parameters ---------- @@ -2291,11 +2311,11 @@ def save(img, filename): def _get_smallest_dtype( - arr, - itypes=(np.uint8, np.int16, np.int32), - ftypes=(), - ): - """ Return the smallest "sensible" dtype that will hold the array data + arr, + itypes=(np.uint8, np.int16, np.int32), + ftypes=(), +): + """Return the smallest "sensible" dtype that will hold the array data The purpose of this function is to support automatic type selection for serialization, so "sensible" here means well-supported in the NIfTI-1 world. @@ -2351,7 +2371,7 @@ def _get_smallest_dtype( def _get_analyze_compat_dtype(arr): - """ Return an Analyze-compatible dtype that ``arr`` can be safely cast to + """Return an Analyze-compatible dtype that ``arr`` can be safely cast to Analyze-compatible types are returned without inspection: @@ -2424,5 +2444,5 @@ def _get_analyze_compat_dtype(arr): return np.dtype('float32') raise ValueError( - f"Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})" + f'Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})' ) diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 10e789d076..9e8e597772 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to NIfTI2 image format +"""Read / write access to NIfTI2 image format Format described here: @@ -120,12 +120,13 @@ class Nifti2Header(Nifti1Header): - """ Class for NIfTI2 header + """Class for NIfTI2 header NIfTI2 is a slightly simplified variant of NIfTI1 which replaces 32-bit floats with 64-bit floats, and increases some integer widths to 32 or 64 bits. """ + template_dtype = header_dtype pair_vox_offset = 0 single_vox_offset = 544 @@ -141,7 +142,7 @@ class Nifti2Header(Nifti1Header): quaternion_threshold = -np.finfo(np.float64).eps * 3 def get_data_shape(self): - """ Get shape of data + """Get shape of data Examples -------- @@ -165,7 +166,7 @@ def get_data_shape(self): return AnalyzeHeader.get_data_shape(self) def set_data_shape(self, shape): - """ Set shape of data + """Set shape of data If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -184,7 +185,7 @@ def set_data_shape(self, shape): @classmethod def default_structarr(klass, endianness=None): - """ Create empty header binary block with given endianness """ + """Create empty header binary block with given endianness""" hdr_data = super(Nifti2Header, klass).default_structarr(endianness) hdr_data['eol_check'] = (13, 10, 26, 10) return hdr_data @@ -194,8 +195,7 @@ def default_structarr(klass, endianness=None): @classmethod def _get_checks(klass): # Add our own checks - return (super(Nifti2Header, klass)._get_checks() + - (klass._chk_eol_check,)) + return super(Nifti2Header, klass)._get_checks() + (klass._chk_eol_check,) @staticmethod def _chk_eol_check(hdr, fix=False): @@ -210,8 +210,9 @@ def _chk_eol_check(hdr, fix=False): rep.fix_msg = 'setting EOL check to 13, 10, 26, 10' return hdr, rep rep.problem_level = 40 - rep.problem_msg = ('EOL check not 0 or 13, 10, 26, 10; data may be ' - 'corrupted by EOL conversion') + rep.problem_msg = ( + 'EOL check not 0 or 13, 10, 26, 10; data may be ' 'corrupted by EOL conversion' + ) if fix: hdr['eol_check'] = (13, 10, 26, 10) rep.fix_msg = 'setting EOL check to 13, 10, 26, 10' @@ -222,34 +223,36 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) bs_hdr_struct = hdr_struct.byteswap() return 540 in (hdr_struct['sizeof_hdr'], bs_hdr_struct['sizeof_hdr']) class Nifti2PairHeader(Nifti2Header): - """ Class for NIfTI2 pair header """ + """Class for NIfTI2 pair header""" + # Signal whether this is single (header + data) file is_single = False class Nifti2Pair(Nifti1Pair): - """ Class for NIfTI2 format image, header pair - """ + """Class for NIfTI2 format image, header pair""" + header_class = Nifti2PairHeader _meta_sniff_len = header_class.sizeof_hdr class Nifti2Image(Nifti1Image): - """ Class for single file NIfTI2 format image - """ + """Class for single file NIfTI2 format image""" + header_class = Nifti2Header _meta_sniff_len = header_class.sizeof_hdr def load(filename): - """ Load NIfTI2 single or pair image from `filename` + """Load NIfTI2 single or pair image from `filename` Parameters ---------- @@ -276,7 +279,7 @@ def load(filename): def save(img, filename): - """ Save NIfTI2 single or pair to `filename` + """Save NIfTI2 single or pair to `filename` Parameters ---------- diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 6b8debc51b..8156b1a403 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -179,5 +179,7 @@ def auto_attr(func): # For backwards compatibility setattr_on_read = deprecate_with_version( - message="setattr_on_read has been renamed to auto_attr. Please use nibabel.onetime.auto_attr", - since="3.2", until="5.0")(auto_attr) + message='setattr_on_read has been renamed to auto_attr. Please use nibabel.onetime.auto_attr', + since='3.2', + until='5.0', +)(auto_attr) diff --git a/nibabel/openers.py b/nibabel/openers.py index b50da10c59..6338711cd7 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Context manager openers for various fileobject types +"""Context manager openers for various fileobject types """ from bz2 import BZ2File @@ -20,6 +20,7 @@ # is indexed_gzip present and modern? try: import indexed_gzip as igzip + version = igzip.__version__ HAVE_INDEXED_GZIP = True @@ -43,11 +44,12 @@ class DeterministicGzipFile(gzip.GzipFile): - """ Deterministic variant of GzipFile + """Deterministic variant of GzipFile This writer does not add filename information to the header, and defaults to a modification time (``mtime``) of 0 seconds. """ + def __init__(self, filename=None, mode=None, compresslevel=9, fileobj=None, mtime=0): # These two guards are copied from # https://github.com/python/cpython/blob/6ab65c6/Lib/gzip.py#L171-L174 @@ -55,8 +57,9 @@ def __init__(self, filename=None, mode=None, compresslevel=9, fileobj=None, mtim mode += 'b' if fileobj is None: fileobj = self.myfileobj = open(filename, mode or 'rb') - return super().__init__(filename="", mode=mode, compresslevel=compresslevel, - fileobj=fileobj, mtime=mtime) + return super().__init__( + filename='', mode=mode, compresslevel=compresslevel, fileobj=fileobj, mtime=mtime + ) def _gzip_open(filename, mode='rb', compresslevel=9, mtime=0, keep_open=False): @@ -74,14 +77,13 @@ def _gzip_open(filename, mode='rb', compresslevel=9, mtime=0, keep_open=False): return gzip_file -def _zstd_open(filename, mode="r", *, level_or_option=None, zstd_dict=None): - pyzstd = optional_package("pyzstd")[0] - return pyzstd.ZstdFile(filename, mode, - level_or_option=level_or_option, zstd_dict=zstd_dict) +def _zstd_open(filename, mode='r', *, level_or_option=None, zstd_dict=None): + pyzstd = optional_package('pyzstd')[0] + return pyzstd.ZstdFile(filename, mode, level_or_option=level_or_option, zstd_dict=zstd_dict) class Opener: - r""" Class to accept, maybe open, and context-manage file-likes / filenames + r"""Class to accept, maybe open, and context-manage file-likes / filenames Provides context manager to close files that the constructor opened for you. @@ -107,15 +109,18 @@ class Opener: '.gz': gz_def, '.bz2': bz2_def, '.zst': zstd_def, - None: (open, ('mode', 'buffering')) # default + None: (open, ('mode', 'buffering')), # default } #: default compression level when writing gz and bz2 files default_compresslevel = 1 #: default option for zst files default_zst_compresslevel = 3 - default_level_or_option = {"rb": None, "r": None, - "wb": default_zst_compresslevel, - "w": default_zst_compresslevel} + default_level_or_option = { + 'rb': None, + 'r': None, + 'wb': default_zst_compresslevel, + 'w': default_zst_compresslevel, + } #: whether to ignore case looking for compression extensions compress_ext_icase = True @@ -165,8 +170,7 @@ def _get_opener_argnames(self, fileish): return self.compress_ext_map[None] def _is_fileobj(self, obj): - """ Is `obj` a file-like object? - """ + """Is `obj` a file-like object?""" return hasattr(obj, 'read') and hasattr(obj, 'write') @property @@ -175,7 +179,7 @@ def closed(self): @property def name(self): - """ Return ``self.fobj.name`` or self._name if not present + """Return ``self.fobj.name`` or self._name if not present self._name will be None if object was created with a fileobj, otherwise it will be the filename. @@ -211,8 +215,7 @@ def __iter__(self): return iter(self.fobj) def close_if_mine(self): - """ Close ``self.fobj`` iff we opened it in the constructor - """ + """Close ``self.fobj`` iff we opened it in the constructor""" if self.me_opened: self.close() @@ -224,7 +227,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class ImageOpener(Opener): - """ Opener-type class to collect extra compressed extensions + """Opener-type class to collect extra compressed extensions A trivial sub-class of opener to which image classes can add extra extensions with custom openers, such as compressed openers. @@ -241,5 +244,6 @@ class ImageOpener(Opener): that `function` accepts. These arguments must be any (unordered) subset of `mode`, `compresslevel`, and `buffering`. """ + # Add new extensions to this dictionary compress_ext_map = Opener.compress_ext_map.copy() diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 348cf1b995..090a73c366 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,4 +1,4 @@ -""" Routines to support optional packages """ +"""Routines to support optional packages""" from packaging.version import Version from .tripwire import TripWire @@ -14,7 +14,7 @@ def _check_pkg_version(pkg, min_version): def optional_package(name, trip_msg=None, min_version=None): - """ Return package-like thing and module setup for package `name` + """Return package-like thing and module setup for package `name` Parameters ---------- @@ -103,12 +103,14 @@ def optional_package(name, trip_msg=None, min_version=None): else: trip_msg = f'These functions need {name} version >= {min_version}' if trip_msg is None: - trip_msg = (f'We need package {name} for these functions, ' - f'but ``import {name}`` raised {exc}') + trip_msg = ( + f'We need package {name} for these functions, ' f'but ``import {name}`` raised {exc}' + ) pkg = TripWire(trip_msg) def setup_module(): import unittest + raise unittest.SkipTest(f'No {name} for these tests') return pkg, False, setup_module diff --git a/nibabel/orientations.py b/nibabel/orientations.py index fab106cab5..0adf19ca78 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utilities for calculating and applying affine orientations """ +"""Utilities for calculating and applying affine orientations""" import numpy as np @@ -20,7 +20,7 @@ class OrientationError(Exception): def io_orientation(affine, tol=None): - """ Orientation of input axes in terms of output axes for `affine` + """Orientation of input axes in terms of output axes for `affine` Valid for an affine transformation from ``p`` dimensions to ``q`` dimensions (``affine.shape == (q + 1, p + 1)``). @@ -66,7 +66,7 @@ def io_orientation(affine, tol=None): # Threshold the singular values to determine the rank. if tol is None: tol = S.max() * max(RS.shape) * np.finfo(S.dtype).eps - keep = (S > tol) + keep = S > tol R = np.dot(P[:, keep], Qs[keep]) # the matrix R is such that np.dot(R,R.T) is projection onto the # columns of P[:,keep] and np.dot(R.T,R) is projection onto the rows @@ -111,9 +111,9 @@ def ornt_transform(start_ornt, end_ornt): start_ornt = np.asarray(start_ornt) end_ornt = np.asarray(end_ornt) if start_ornt.shape != end_ornt.shape: - raise ValueError("The orientations must have the same shape") + raise ValueError('The orientations must have the same shape') if start_ornt.shape[1] != 2: - raise ValueError(f"Invalid shape for an orientation: {start_ornt.shape}") + raise ValueError(f'Invalid shape for an orientation: {start_ornt.shape}') result = np.empty_like(start_ornt) for end_in_idx, (end_out_idx, end_flip) in enumerate(end_ornt): for start_in_idx, (start_out_idx, start_flip) in enumerate(start_ornt): @@ -125,13 +125,12 @@ def ornt_transform(start_ornt, end_ornt): result[start_in_idx, :] = [end_in_idx, flip] break else: - raise ValueError("Unable to find out axis %d in start_ornt" % - end_out_idx) + raise ValueError('Unable to find out axis %d in start_ornt' % end_out_idx) return result def apply_orientation(arr, ornt): - """ Apply transformations implied by `ornt` to the first + """Apply transformations implied by `ornt` to the first n axes of the array `arr` Parameters @@ -155,12 +154,10 @@ def apply_orientation(arr, ornt): ornt = np.asarray(ornt) n = ornt.shape[0] if t_arr.ndim < n: - raise OrientationError('Data array has fewer dimensions than ' - 'orientation') + raise OrientationError('Data array has fewer dimensions than ' 'orientation') # no coordinates can be dropped for applying the orientations if np.any(np.isnan(ornt[:, 0])): - raise OrientationError('Cannot drop coordinates when ' - 'applying orientation to data') + raise OrientationError('Cannot drop coordinates when ' 'applying orientation to data') # apply ornt transformations for ax, flip in enumerate(ornt[:, 1]): if flip == -1: @@ -173,7 +170,7 @@ def apply_orientation(arr, ornt): def inv_ornt_aff(ornt, shape): - """ Affine transform reversing transforms implied in `ornt` + """Affine transform reversing transforms implied in `ornt` Imagine you have an array ``arr`` of shape `shape`, and you apply the transforms implied by `ornt` (more below), to get ``tarr``. @@ -211,7 +208,7 @@ def inv_ornt_aff(ornt, shape): """ ornt = np.asarray(ornt) if np.any(np.isnan(ornt)): - raise OrientationError("We cannot invert orientation transform") + raise OrientationError('We cannot invert orientation transform') p = ornt.shape[0] shape = np.array(shape)[:p] # ornt implies a flip, followed by a transpose. We need the affine @@ -228,12 +225,9 @@ def inv_ornt_aff(ornt, shape): return np.dot(undo_flip, undo_reorder) -@deprecate_with_version('flip_axis is deprecated. ' - 'Please use numpy.flip instead.', - '3.2', - '5.0') +@deprecate_with_version('flip_axis is deprecated. ' 'Please use numpy.flip instead.', '3.2', '5.0') def flip_axis(arr, axis=0): - """ Flip contents of `axis` in array `arr` + """Flip contents of `axis` in array `arr` Equivalent to ``np.flip(arr, axis)``. @@ -252,7 +246,7 @@ def flip_axis(arr, axis=0): def ornt2axcodes(ornt, labels=None): - """ Convert orientation `ornt` to labels for axis directions + """Convert orientation `ornt` to labels for axis directions Parameters ---------- @@ -299,7 +293,7 @@ def ornt2axcodes(ornt, labels=None): def axcodes2ornt(axcodes, labels=None): - """ Convert axis codes `axcodes` to an orientation + """Convert axis codes `axcodes` to an orientation Parameters ---------- @@ -346,7 +340,7 @@ def axcodes2ornt(axcodes, labels=None): def aff2axcodes(aff, labels=None, tol=None): - """ axis direction codes for affine `aff` + """axis direction codes for affine `aff` Parameters ---------- diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 304c0c2cc0..c7d7a55617 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -139,27 +139,20 @@ from .openers import ImageOpener # PSL to RAS affine -PSL_TO_RAS = np.array([[0, 0, -1, 0], # L -> R - [-1, 0, 0, 0], # P -> A - [0, 1, 0, 0], # S -> S - [0, 0, 0, 1]]) +PSL_TO_RAS = np.array( + [[0, 0, -1, 0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]] # L -> R # P -> A # S -> S +) # Acquisition (tra/sag/cor) to PSL axes # These come from looking at transverse, sagittal, coronal datasets where we # can see the LR, PA, SI orientation of the slice axes from the scanned object ACQ_TO_PSL = dict( - transverse=np.array([[0, 1, 0, 0], # P - [0, 0, 1, 0], # S - [1, 0, 0, 0], # L - [0, 0, 0, 1]]), + transverse=np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L sagittal=np.diag([1, -1, -1, 1]), - coronal=np.array([[0, 0, 1, 0], # P - [0, -1, 0, 0], # S - [1, 0, 0, 0], # L - [0, 0, 0, 1]]) + coronal=np.array([[0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L ) -DEG2RAD = np.pi / 180. +DEG2RAD = np.pi / 180.0 # General information dict definitions # assign props to PAR header entries @@ -218,46 +211,110 @@ image_def_dtds = {} image_def_dtds['V4'] = [ ('slice number', int), - ('echo number', int,), - ('dynamic scan number', int,), - ('cardiac phase number', int,), - ('image_type_mr', int,), - ('scanning sequence', int,), - ('index in REC file', int,), - ('image pixel size', int,), - ('scan percentage', int,), + ( + 'echo number', + int, + ), + ( + 'dynamic scan number', + int, + ), + ( + 'cardiac phase number', + int, + ), + ( + 'image_type_mr', + int, + ), + ( + 'scanning sequence', + int, + ), + ( + 'index in REC file', + int, + ), + ( + 'image pixel size', + int, + ), + ( + 'scan percentage', + int, + ), ('recon resolution', int, (2,)), ('rescale intercept', float), ('rescale slope', float), ('scale slope', float), # Window center, width recorded as integer but can be float - ('window center', float,), - ('window width', float,), + ( + 'window center', + float, + ), + ( + 'window width', + float, + ), ('image angulation', float, (3,)), ('image offcentre', float, (3,)), ('slice thickness', float), ('slice gap', float), - ('image_display_orientation', int,), - ('slice orientation', int,), - ('fmri_status_indication', int,), - ('image_type_ed_es', int,), + ( + 'image_display_orientation', + int, + ), + ( + 'slice orientation', + int, + ), + ( + 'fmri_status_indication', + int, + ), + ( + 'image_type_ed_es', + int, + ), ('pixel spacing', float, (2,)), ('echo_time', float), ('dyn_scan_begin_time', float), ('trigger_time', float), ('diffusion_b_factor', float), - ('number of averages', int,), + ( + 'number of averages', + int, + ), ('image_flip_angle', float), - ('cardiac frequency', int,), - ('minimum RR-interval', int,), - ('maximum RR-interval', int,), - ('TURBO factor', int,), - ('Inversion delay', float)] + ( + 'cardiac frequency', + int, + ), + ( + 'minimum RR-interval', + int, + ), + ( + 'maximum RR-interval', + int, + ), + ( + 'TURBO factor', + int, + ), + ('Inversion delay', float), +] # Extra image def fields for 4.1 compared to 4 image_def_dtds['V4.1'] = image_def_dtds['V4'] + [ - ('diffusion b value number', int,), # (imagekey!) - ('gradient orientation number', int,), # (imagekey!) + ( + 'diffusion b value number', + int, + ), # (imagekey!) + ( + 'gradient orientation number', + int, + ), # (imagekey!) ('contrast type', 'S30'), # XXX might be too short? ('diffusion anisotropy type', 'S30'), # XXX might be too short? ('diffusion', float, (3,)), @@ -265,7 +322,10 @@ # Extra image def fields for 4.2 compared to 4.1 image_def_dtds['V4.2'] = image_def_dtds['V4.1'] + [ - ('label type', int,), # (imagekey!) + ( + 'label type', + int, + ), # (imagekey!) ] #: PAR header versions we claim to understand @@ -275,10 +335,9 @@ image_def_dtype = np.dtype(image_def_dtds['V4.2']) #: slice orientation codes -slice_orientation_codes = Recoder(( # code, label - (1, 'transverse'), - (2, 'sagittal'), - (3, 'coronal')), fields=('code', 'label')) +slice_orientation_codes = Recoder( + ((1, 'transverse'), (2, 'sagittal'), (3, 'coronal')), fields=('code', 'label') # code, label +) class PARRECError(Exception): @@ -290,11 +349,11 @@ class PARRECError(Exception): # Value after colon may be absent -GEN_RE = re.compile(r".\s+(.*?)\s*:\s*(.*)") +GEN_RE = re.compile(r'.\s+(.*?)\s*:\s*(.*)') def _split_header(fobj): - """ Split header into `version`, `gen_dict`, `image_lines` """ + """Split header into `version`, `gen_dict`, `image_lines`""" version = None gen_dict = {} image_lines = [] @@ -326,8 +385,7 @@ def _split_header(fobj): def _process_gen_dict(gen_dict): - """ Process `gen_dict` key, values into `general_info` - """ + """Process `gen_dict` key, values into `general_info`""" general_info = {} for key, value in gen_dict.items(): # get props for this hdr field @@ -347,8 +405,7 @@ def _process_gen_dict(gen_dict): def _process_image_lines(image_lines, version): - """ Process image information definition lines according to `version` - """ + """Process image information definition lines according to `version`""" # postproc image def props image_def_dtd = image_def_dtds[version] # create an array for all image defs @@ -368,7 +425,7 @@ def _process_image_lines(image_lines, version): elif len(props) == 3: name, np_type, shape = props nelements = np.prod(shape) - value = items[item_counter:item_counter + nelements] + value = items[item_counter : item_counter + nelements] value = [np_type(v) for v in value] item_counter += nelements image_defs[name][i] = value @@ -376,7 +433,7 @@ def _process_image_lines(image_lines, version): def vol_numbers(slice_nos): - """ Calculate volume numbers inferred from slice numbers `slice_nos` + """Calculate volume numbers inferred from slice numbers `slice_nos` The volume number for each slice is the number of times this slice number has occurred previously in the `slice_nos` sequence @@ -402,7 +459,7 @@ def vol_numbers(slice_nos): def vol_is_full(slice_nos, slice_max, slice_min=1): - """ Vector with True for slices in complete volume, False otherwise + """Vector with True for slices in complete volume, False otherwise Parameters ---------- @@ -440,10 +497,11 @@ def vol_is_full(slice_nos, slice_max, slice_min=1): def _truncation_checks(general_info, image_defs, permit_truncated): - """ Check for presence of truncation in PAR file parameters + """Check for presence of truncation in PAR file parameters Raise error if truncation present and `permit_truncated` is False. """ + def _err_or_warn(msg): if not permit_truncated: raise PARRECError(msg) @@ -457,8 +515,9 @@ def _chk_trunc(idef_name, gdef_max_name): n_expected = general_info[gdef_max_name] if n_have != n_expected: _err_or_warn( - f"Header inconsistency: Found {n_have} {idef_name} " - f"values, but expected {n_expected}") + f'Header inconsistency: Found {n_have} {idef_name} ' + f'values, but expected {n_expected}' + ) _chk_trunc('slice', 'max_slices') _chk_trunc('echo', 'max_echoes') @@ -467,13 +526,12 @@ def _chk_trunc(idef_name, gdef_max_name): _chk_trunc('gradient orientation', 'max_gradient_orient') # Final check for partial volumes - if not np.all(vol_is_full(image_defs['slice number'], - general_info['max_slices'])): - _err_or_warn("Found one or more partial volume(s)") + if not np.all(vol_is_full(image_defs['slice number'], general_info['max_slices'])): + _err_or_warn('Found one or more partial volume(s)') def one_line(long_str): - """ Make maybe mutli-line `long_str` into one long line """ + """Make maybe mutli-line `long_str` into one long line""" return ' '.join(line.strip() for line in long_str.splitlines()) @@ -496,18 +554,22 @@ def parse_PAR_header(fobj): # single pass through the header version, gen_dict, image_lines = _split_header(fobj) if version not in supported_versions: - warnings.warn(one_line( - f""" PAR/REC version '{version}' is currently not supported -- making an + warnings.warn( + one_line( + f""" PAR/REC version '{version}' is currently not supported -- making an attempt to read nevertheless. Please email the NiBabel mailing list, if you are interested in adding support for this version. - """)) + """ + ) + ) general_info = _process_gen_dict(gen_dict) image_defs = _process_image_lines(image_lines, version) return general_info, image_defs -def _data_from_rec(rec_fileobj, in_shape, dtype, slice_indices, out_shape, - scalings=None, mmap=True): +def _data_from_rec( + rec_fileobj, in_shape, dtype, slice_indices, out_shape, scalings=None, mmap=True +): """Load and return array data from REC file Parameters @@ -564,10 +626,8 @@ def exts2pars(exts_source): element contains a PARRECHeader read from the contained extensions. """ headers = [] - exts_source = (exts_source.header if hasattr(exts_source, 'header') - else exts_source) - exts_source = (exts_source.extensions if hasattr(exts_source, 'extensions') - else exts_source) + exts_source = exts_source.header if hasattr(exts_source, 'header') else exts_source + exts_source = exts_source.extensions if hasattr(exts_source, 'extensions') else exts_source for extension in exts_source: content = extension.get_content() content = content.decode(getpreferredencoding(False)) @@ -579,9 +639,8 @@ def exts2pars(exts_source): class PARRECArrayProxy: - def __init__(self, file_like, header, *, mmap=True, scaling='dv'): - """ Initialize PARREC array proxy + """Initialize PARREC array proxy Parameters ---------- @@ -663,16 +722,15 @@ def _get_scaled(self, dtype, slicer): # Slice scaling to give output shape return raw_data * slopes[slicer].astype(final_type) + inters[slicer].astype(final_type) - def get_unscaled(self): - """ Read data from file + """Read data from file This is an optional part of the proxy API """ return self._get_unscaled(slicer=()) def __array__(self, dtype=None): - """ Read data from file and apply scaling, casting to ``dtype`` + """Read data from file and apply scaling, casting to ``dtype`` If ``dtype`` is unspecified, the dtype of the returned array is the narrowest dtype that can represent the data without overflow. @@ -700,8 +758,7 @@ def __getitem__(self, slicer): class PARRECHeader(SpatialHeader): """PAR/REC header""" - def __init__(self, info, image_defs, permit_truncated=False, - strict_sort=False): + def __init__(self, info, image_defs, permit_truncated=False, strict_sort=False): """ Parameters ---------- @@ -730,13 +787,15 @@ def __init__(self, info, image_defs, permit_truncated=False, # dtype bitpix = self._get_unique_image_prop('image pixel size') if bitpix not in (8, 16): - raise PARRECError(f'Only 8- and 16-bit data supported (not {bitpix}) ' - 'please report this to the nibabel developers') + raise PARRECError( + f'Only 8- and 16-bit data supported (not {bitpix}) ' + 'please report this to the nibabel developers' + ) # REC data always little endian dt = np.dtype('uint' + str(bitpix)).newbyteorder('<') - super(PARRECHeader, self).__init__(data_dtype=dt, - shape=self._calc_data_shape(), - zooms=self._calc_zooms()) + super(PARRECHeader, self).__init__( + data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() + ) @classmethod def from_header(klass, header=None): @@ -744,20 +803,20 @@ def from_header(klass, header=None): raise PARRECError('Cannot create PARRECHeader from air.') if type(header) == klass: return header.copy() - raise PARRECError('Cannot create PARREC header from ' - 'non-PARREC header.') + raise PARRECError('Cannot create PARREC header from ' 'non-PARREC header.') @classmethod - def from_fileobj(klass, fileobj, permit_truncated=False, - strict_sort=False): + def from_fileobj(klass, fileobj, permit_truncated=False, strict_sort=False): info, image_defs = parse_PAR_header(fileobj) return klass(info, image_defs, permit_truncated, strict_sort) def copy(self): - return PARRECHeader(deepcopy(self.general_info), - self.image_defs.copy(), - self.permit_truncated, - self.strict_sort) + return PARRECHeader( + deepcopy(self.general_info), + self.image_defs.copy(), + self.permit_truncated, + self.strict_sort, + ) def as_analyze_map(self): """Convert PAR parameters to NIFTI1 format""" @@ -765,12 +824,15 @@ def as_analyze_map(self): # the NIfTI1 header, specifically in nifti1.py `header_dtd` defs. # Here we set the parameters we can to simplify PAR/REC # to NIfTI conversion. - descr = (f"{self.general_info['exam_name']};" - f"{self.general_info['patient_name']};" - f"{self.general_info['exam_date'].replace(' ', '')};" - f"{self.general_info['protocol_name']}" - )[:80] # max len - is_fmri = (self.general_info['max_dynamics'] > 1) + descr = ( + f"{self.general_info['exam_name']};" + f"{self.general_info['patient_name']};" + f"{self.general_info['exam_date'].replace(' ', '')};" + f"{self.general_info['protocol_name']}" + )[ + :80 + ] # max len + is_fmri = self.general_info['max_dynamics'] > 1 # PAR/REC uses msec, but in _calc_zooms we convert to sec t = 'sec' if is_fmri else 'unknown' xyzt_units = unit_codes['mm'] + unit_codes[t] @@ -821,14 +883,14 @@ def get_bvals_bvecs(self): else: n_slices, n_vols = self.get_data_shape()[-2:] bvals = self.image_defs['diffusion_b_factor'][reorder].reshape( - (n_slices, n_vols), order='F') + (n_slices, n_vols), order='F' + ) # All bvals within volume should be the same assert not np.any(np.diff(bvals, axis=0)) bvals = bvals[0] if 'diffusion' not in self.image_defs.dtype.names: return bvals, None - bvecs = self.image_defs['diffusion'][reorder].reshape( - (n_slices, n_vols, 3), order='F') + bvecs = self.image_defs['diffusion'][reorder].reshape((n_slices, n_vols, 3), order='F') # All 3 values of bvecs should be same within volume assert not np.any(np.diff(bvecs, axis=0)) bvecs = bvecs[0] @@ -838,12 +900,12 @@ def get_bvals_bvecs(self): return bvals, bvecs def get_def(self, name): - """Return a single image definition field (or None if missing) """ + """Return a single image definition field (or None if missing)""" idef = self.image_defs return idef[name] if name in idef.dtype.names else None def _get_unique_image_prop(self, name): - """ Scan image definitions and return unique value of a property. + """Scan image definitions and return unique value of a property. * Get array for named field of ``self.image_defs``; * Check that all rows in the array are the same and raise error @@ -866,18 +928,19 @@ def _get_unique_image_prop(self, name): """ props = self.image_defs[name] if np.any(np.diff(props, axis=0)): - raise PARRECError(f'Varying {name} in image sequence ' - f'({props}). This is not supported.') + raise PARRECError( + f'Varying {name} in image sequence ' f'({props}). This is not supported.' + ) return props[0] def get_data_offset(self): - """ PAR header always has 0 data offset (into REC file) """ + """PAR header always has 0 data offset (into REC file)""" return 0 def set_data_offset(self, offset): - """ PAR header always has 0 data offset (into REC file) """ + """PAR header always has 0 data offset (into REC file)""" if offset != 0: - raise PARRECError("PAR header assumes offset 0") + raise PARRECError('PAR header assumes offset 0') def _calc_zooms(self): """Compute image zooms from header data. @@ -906,8 +969,8 @@ def _calc_zooms(self): # If 4D dynamic scan, convert time from milliseconds to seconds if len(zooms) > 3 and self.general_info['dyn_scan']: if len(self.general_info['repetition_time']) > 1: - warnings.warn("multiple TRs found in .PAR file") - zooms[3] = self.general_info['repetition_time'][0] / 1000. + warnings.warn('multiple TRs found in .PAR file') + zooms[3] = self.general_info['repetition_time'][0] / 1000.0 return zooms def get_affine(self, origin='scanner'): @@ -948,13 +1011,12 @@ def get_affine(self, origin='scanner'): """ # shape, zooms in original data ordering (ijk ordering) ijk_shape = np.array(self.get_data_shape()[:3]) - to_center = from_matvec(np.eye(3), -(ijk_shape - 1) / 2.) + to_center = from_matvec(np.eye(3), -(ijk_shape - 1) / 2.0) zoomer = np.diag(list(self.get_zooms()[:3]) + [1]) slice_orientation = self.get_slice_orientation() permute_to_psl = ACQ_TO_PSL.get(slice_orientation) if permute_to_psl is None: - raise PARRECError( - f"Unknown slice orientation ({slice_orientation}).") + raise PARRECError(f'Unknown slice orientation ({slice_orientation}).') # hdr has deg, we need radians # Order is [ap, fh, rl] ap_rot, fh_rot, rl_rot = self.general_info['angulation'] * DEG2RAD @@ -974,18 +1036,18 @@ def get_affine(self, origin='scanner'): return np.dot(PSL_TO_RAS, psl_aff) def _get_n_slices(self): - """ Get number of slices for output data """ + """Get number of slices for output data""" return len(set(self.image_defs['slice number'])) def _get_n_vols(self): - """ Get number of volumes for output data """ + """Get number of volumes for output data""" slice_nos = self.image_defs['slice number'] vol_nos = vol_numbers(slice_nos) is_full = vol_is_full(slice_nos, self.general_info['max_slices']) return len(set(np.array(vol_nos)[is_full])) def _calc_data_shape(self): - """ Calculate the output shape of the image data + """Calculate the output shape of the image data Returns length 3 tuple for 3D image, length 4 tuple for 4D. @@ -1010,7 +1072,7 @@ def _calc_data_shape(self): n_vols = self._get_n_vols() return shape + (n_vols,) if n_vols > 1 else shape - def get_data_scaling(self, method="dv"): + def get_data_scaling(self, method='dv'): """Returns scaling slope and intercept. Parameters @@ -1074,7 +1136,7 @@ def get_rec_shape(self): return inplane_shape + (len(self.image_defs),) def _strict_sort_order(self): - """ Determine the sort order based on several image definition fields. + """Determine the sort order based on several image definition fields. The fields taken into consideration, if present, are (in order from slowest to fastest variation after sorting): @@ -1112,8 +1174,7 @@ def _strict_sort_order(self): image_type = idefs['image_type_mr'] # sort keys only present in a subset of .PAR files - asl_keys = ((idefs['label type'], ) if 'label type' in - idefs.dtype.names else ()) + asl_keys = (idefs['label type'],) if 'label type' in idefs.dtype.names else () if self.general_info['diffusion'] != 0: bvals = self.get_def('diffusion b value number') if bvals is None: @@ -1121,22 +1182,20 @@ def _strict_sort_order(self): bvecs = self.get_def('gradient orientation number') if bvecs is None: # no b-vectors available - diffusion_keys = (bvals, ) + diffusion_keys = (bvals,) else: diffusion_keys = (bvecs, bvals) else: diffusion_keys = () # initial sort (last key is highest precedence) - keys = (slice_nos, echos, phases) + \ - diffusion_keys + asl_keys + (dynamics, image_type) + keys = (slice_nos, echos, phases) + diffusion_keys + asl_keys + (dynamics, image_type) initial_sort_order = np.lexsort(keys) # sequentially number the volumes based on the initial sort vol_nos = vol_numbers(slice_nos[initial_sort_order]) # identify truncated volumes - is_full = vol_is_full(slice_nos[initial_sort_order], - self.general_info['max_slices']) + is_full = vol_is_full(slice_nos[initial_sort_order], self.general_info['max_slices']) # second stage of sorting return initial_sort_order[np.lexsort((vol_nos, is_full))] @@ -1182,7 +1241,7 @@ def get_sorted_slice_indices(self): return sort_order[:n_used] def get_volume_labels(self): - """ Dynamic labels corresponding to the final data dimension(s). + """Dynamic labels corresponding to the final data dimension(s). This is useful for custom data sorting. A subset of the info in ``self.image_defs`` is returned in an order that matches the final @@ -1200,18 +1259,19 @@ def get_volume_labels(self): image_defs = self.image_defs # define which keys which might vary across image volumes - dynamic_keys = ['cardiac phase number', - 'echo number', - 'label type', - 'image_type_mr', - 'dynamic scan number', - 'scanning sequence', - 'gradient orientation number', - 'diffusion b value number'] + dynamic_keys = [ + 'cardiac phase number', + 'echo number', + 'label type', + 'image_type_mr', + 'dynamic scan number', + 'scanning sequence', + 'gradient orientation number', + 'diffusion b value number', + ] # remove dynamic keys that may not be present in older .PAR versions - dynamic_keys = [d for d in dynamic_keys if d in - image_defs.dtype.fields] + dynamic_keys = [d for d in dynamic_keys if d in image_defs.dtype.fields] non_unique_keys = [] for key in dynamic_keys: @@ -1219,7 +1279,7 @@ def get_volume_labels(self): if ndim == 1: num_unique = len(np.unique(image_defs[key])) else: - raise ValueError("unexpected image_defs shape > 1D") + raise ValueError('unexpected image_defs shape > 1D') if num_unique > 1: non_unique_keys.append(key) @@ -1235,6 +1295,7 @@ def get_volume_labels(self): class PARRECImage(SpatialImage): """PAR/REC image""" + header_class = PARRECHeader valid_exts = ('.rec', '.par') files_types = (('image', '.rec'), ('header', '.par')) @@ -1245,9 +1306,10 @@ class PARRECImage(SpatialImage): ImageArrayProxy = PARRECArrayProxy @classmethod - def from_file_map(klass, file_map, *, mmap=True, permit_truncated=False, - scaling='dv', strict_sort=False): - """ Create PARREC image from file map `file_map` + def from_file_map( + klass, file_map, *, mmap=True, permit_truncated=False, scaling='dv', strict_sort=False + ): + """Create PARREC image from file map `file_map` Parameters ---------- @@ -1275,19 +1337,17 @@ def from_file_map(klass, file_map, *, mmap=True, permit_truncated=False, """ with file_map['header'].get_prepare_fileobj('rt') as hdr_fobj: hdr = klass.header_class.from_fileobj( - hdr_fobj, - permit_truncated=permit_truncated, - strict_sort=strict_sort) + hdr_fobj, permit_truncated=permit_truncated, strict_sort=strict_sort + ) rec_fobj = file_map['image'].get_prepare_fileobj() - data = klass.ImageArrayProxy(rec_fobj, hdr, - mmap=mmap, scaling=scaling) - return klass(data, hdr.get_affine(), header=hdr, extra=None, - file_map=file_map) + data = klass.ImageArrayProxy(rec_fobj, hdr, mmap=mmap, scaling=scaling) + return klass(data, hdr.get_affine(), header=hdr, extra=None, file_map=file_map) @classmethod - def from_filename(klass, filename, *, mmap=True, permit_truncated=False, - scaling='dv', strict_sort=False): - """ Create PARREC image from filename `filename` + def from_filename( + klass, filename, *, mmap=True, permit_truncated=False, scaling='dv', strict_sort=False + ): + """Create PARREC image from filename `filename` Parameters ---------- @@ -1313,11 +1373,13 @@ def from_filename(klass, filename, *, mmap=True, permit_truncated=False, the slices appear in the .PAR file. """ file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, - mmap=mmap, - permit_truncated=permit_truncated, - scaling=scaling, - strict_sort=strict_sort) + return klass.from_file_map( + file_map, + mmap=mmap, + permit_truncated=permit_truncated, + scaling=scaling, + strict_sort=strict_sort, + ) load = from_filename diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index e28cc6e28d..4d0257f4d6 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -6,13 +6,12 @@ def _cmp(a, b): - """ Implementation of ``cmp`` for Python 3 - """ + """Implementation of ``cmp`` for Python 3""" return (a > b) - (a < b) def cmp_pkg_version(version_str, pkg_version_str=__version__): - """ Compare ``version_str`` to current package version + """Compare ``version_str`` to current package version This comparator follows `PEP-440`_ conventions for determining version ordering. @@ -63,7 +62,7 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): def pkg_commit_hash(pkg_path=None): - """ Get short form of commit hash + """Get short form of commit hash Versioneer placed a ``_version.py`` file in the package directory. This file gets updated on installation or ``git archive``. @@ -98,7 +97,7 @@ def pkg_commit_hash(pkg_path=None): def get_pkg_info(pkg_path): - """ Return dict describing the context of this package + """Return dict describing the context of this package Parameters ---------- @@ -112,6 +111,7 @@ def get_pkg_info(pkg_path): """ src, hsh = pkg_commit_hash() import numpy + return dict( pkg_path=pkg_path, commit_source=src, @@ -119,4 +119,5 @@ def get_pkg_info(pkg_path): sys_version=sys.version, sys_executable=sys.executable, sys_platform=sys.platform, - np_version=numpy.__version__) + np_version=numpy.__version__, + ) diff --git a/nibabel/processing.py b/nibabel/processing.py index b7abfb8c75..336e9b40f1 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Image processing functions for: +"""Image processing functions for: * smoothing * resampling @@ -19,6 +19,7 @@ import numpy.linalg as npl from .optpkg import optional_package + spnd, _, _ = optional_package('scipy.ndimage') from .affines import AffineError, to_matvec, from_matvec, append_diag, rescale_affine @@ -31,7 +32,7 @@ def fwhm2sigma(fwhm): - """ Convert a FWHM value to sigma in a Gaussian kernel. + """Convert a FWHM value to sigma in a Gaussian kernel. Parameters ---------- @@ -54,7 +55,7 @@ def fwhm2sigma(fwhm): def sigma2fwhm(sigma): - """ Convert a sigma in a Gaussian kernel to a FWHM value + """Convert a sigma in a Gaussian kernel to a FWHM value Parameters ---------- @@ -77,7 +78,7 @@ def sigma2fwhm(sigma): def adapt_affine(affine, n_dim): - """ Adapt input / output dimensions of spatial `affine` for `n_dims` + """Adapt input / output dimensions of spatial `affine` for `n_dims` Adapts a spatial (4, 4) affine that is being applied to an image with fewer than 3 spatial dimensions, or more than 3 dimensions. If there are more @@ -112,13 +113,10 @@ def adapt_affine(affine, n_dim): return adapted -def resample_from_to(from_img, - to_vox_map, - order=3, - mode='constant', - cval=0., - out_class=Nifti1Image): - """ Resample image `from_img` to mapped voxel space `to_vox_map` +def resample_from_to( + from_img, to_vox_map, order=3, mode='constant', cval=0.0, out_class=Nifti1Image +): + """Resample image `from_img` to mapped voxel space `to_vox_map` Resample using N-d spline interpolation. @@ -156,8 +154,9 @@ def resample_from_to(from_img, """ # This check requires `shape` attribute of image if not spatial_axes_first(from_img): - raise ValueError('Cannot predict position of spatial axes for Image ' - 'type ' + str(type(from_img))) + raise ValueError( + 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(from_img)) + ) try: to_shape, to_affine = to_vox_map.shape, to_vox_map.affine except AttributeError: @@ -171,23 +170,16 @@ def resample_from_to(from_img, a_from_affine = adapt_affine(from_img.affine, from_n_dim) to_vox2from_vox = npl.inv(a_from_affine).dot(a_to_affine) rzs, trans = to_matvec(to_vox2from_vox) - data = spnd.affine_transform(from_img.dataobj, - rzs, - trans, - to_shape, - order=order, - mode=mode, - cval=cval) + data = spnd.affine_transform( + from_img.dataobj, rzs, trans, to_shape, order=order, mode=mode, cval=cval + ) return out_class(data, to_affine, from_img.header) -def resample_to_output(in_img, - voxel_sizes=None, - order=3, - mode='constant', - cval=0., - out_class=Nifti1Image): - """ Resample image `in_img` to output voxel axes (world space) +def resample_to_output( + in_img, voxel_sizes=None, order=3, mode='constant', cval=0.0, out_class=Nifti1Image +): + """Resample image `in_img` to output voxel axes (world space) Parameters ---------- @@ -243,12 +235,8 @@ def resample_to_output(in_img, return resample_from_to(in_img, out_vox_map, order, mode, cval, out_class) -def smooth_image(img, - fwhm, - mode='nearest', - cval=0., - out_class=Nifti1Image): - """ Smooth image `img` along voxel axes by FWHM `fwhm` millimeters +def smooth_image(img, fwhm, mode='nearest', cval=0.0, out_class=Nifti1Image): + """Smooth image `img` along voxel axes by FWHM `fwhm` millimeters Parameters ---------- @@ -287,8 +275,9 @@ def smooth_image(img, """ # This check requires `shape` attribute of image if not spatial_axes_first(img): - raise ValueError('Cannot predict position of spatial axes for Image ' - 'type ' + str(type(img))) + raise ValueError( + 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(img)) + ) if out_class is None: out_class = img.__class__ n_dim = len(img.shape) @@ -301,26 +290,25 @@ def smooth_image(img, fwhm[:3] = fwhm_scalar # Voxel sizes RZS = img.affine[:, :n_dim] - vox = np.sqrt(np.sum(RZS ** 2, 0)) + vox = np.sqrt(np.sum(RZS**2, 0)) # Smoothing in terms of voxels vox_fwhm = fwhm / vox vox_sd = fwhm2sigma(vox_fwhm) # Do the smoothing - sm_data = spnd.gaussian_filter(img.dataobj, - vox_sd, - mode=mode, - cval=cval) + sm_data = spnd.gaussian_filter(img.dataobj, vox_sd, mode=mode, cval=cval) return out_class(sm_data, img.affine, img.header) -def conform(from_img, - out_shape=(256, 256, 256), - voxel_size=(1.0, 1.0, 1.0), - order=3, - cval=0.0, - orientation='RAS', - out_class=None): - """ Resample image to ``out_shape`` with voxels of size ``voxel_size``. +def conform( + from_img, + out_shape=(256, 256, 256), + voxel_size=(1.0, 1.0, 1.0), + order=3, + cval=0.0, + orientation='RAS', + out_class=None, +): + """Resample image to ``out_shape`` with voxels of size ``voxel_size``. Using the default arguments, this function is meant to replicate most parts of FreeSurfer's ``mri_convert --conform`` command. Specifically, this @@ -367,11 +355,11 @@ def conform(from_img, # are written. required_ndim = 3 if from_img.ndim != required_ndim: - raise ValueError("Only 3D images are supported.") + raise ValueError('Only 3D images are supported.') elif len(out_shape) != required_ndim: - raise ValueError(f"`out_shape` must have {required_ndim} values") + raise ValueError(f'`out_shape` must have {required_ndim} values') elif len(voxel_size) != required_ndim: - raise ValueError(f"`voxel_size` must have {required_ndim} values") + raise ValueError(f'`voxel_size` must have {required_ndim} values') start_ornt = io_orientation(from_img.affine) end_ornt = axcodes2ornt(orientation) @@ -384,7 +372,12 @@ def conform(from_img, # Resample input image. out_img = resample_from_to( - from_img=from_img, to_vox_map=(out_shape, out_aff), order=order, mode="constant", - cval=cval, out_class=out_class) + from_img=from_img, + to_vox_map=(out_shape, out_aff), + order=order, + mode='constant', + cval=cval, + out_class=out_class, + ) return out_img diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index 5f827e2bbf..a58c2fdba9 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -1,4 +1,4 @@ -""" Adapter module for working with pydicom < 1.0 and >= 1.0 +"""Adapter module for working with pydicom < 1.0 and >= 1.0 In what follows, "dicom is available" means we can import either a) ``dicom`` (pydicom < 1.0) or or b) ``pydicom`` (pydicom >= 1.0). @@ -35,6 +35,7 @@ else: # pydicom module available from pydicom.dicomio import read_file from pydicom.sequence import Sequence + # Values not imported by default import pydicom.values @@ -42,9 +43,11 @@ tag_for_keyword = pydicom.datadict.tag_for_keyword -@deprecate_with_version("dicom_test has been moved to nibabel.nicom.tests", - since="3.1", until="5.0") +@deprecate_with_version( + 'dicom_test has been moved to nibabel.nicom.tests', since='3.1', until='5.0' +) def dicom_test(func): # Import locally to avoid circular dependency from .nicom.tests import dicom_test + return dicom_test(func) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 1b8e8b0454..7ae9a3c63a 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -33,7 +33,7 @@ def fillpositive(xyz, w2_thresh=None): - """ Compute unit quaternion from last 3 values + """Compute unit quaternion from last 3 values Parameters ---------- @@ -104,7 +104,7 @@ def fillpositive(xyz, w2_thresh=None): def quat2mat(q): - """ Calculate rotation matrix corresponding to quaternion + """Calculate rotation matrix corresponding to quaternion Parameters ---------- @@ -147,13 +147,17 @@ def quat2mat(q): wX, wY, wZ = w * X, w * Y, w * Z xX, xY, xZ = x * X, x * Y, x * Z yY, yZ, zZ = y * Y, y * Z, z * Z - return np.array([[1.0 - (yY + zZ), xY - wZ, xZ + wY], - [xY + wZ, 1.0 - (xX + zZ), yZ - wX], - [xZ - wY, yZ + wX, 1.0 - (xX + yY)]]) + return np.array( + [ + [1.0 - (yY + zZ), xY - wZ, xZ + wY], + [xY + wZ, 1.0 - (xX + zZ), yZ - wX], + [xZ - wY, yZ + wX, 1.0 - (xX + yY)], + ] + ) def mat2quat(M): - """ Calculate quaternion corresponding to given rotation matrix + """Calculate quaternion corresponding to given rotation matrix Parameters ---------- @@ -201,12 +205,17 @@ def mat2quat(M): # M[0,1]. The notation is from the Wikipedia article. Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat # Fill only lower half of symmetric matrix - K = np.array([ - [Qxx - Qyy - Qzz, 0, 0, 0], - [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0], - [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0], - [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz]] - ) / 3.0 + K = ( + np.array( + [ + [Qxx - Qyy - Qzz, 0, 0, 0], + [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0], + [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0], + [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz], + ] + ) + / 3.0 + ) # Use Hermitian eigenvectors, values for speed vals, vecs = np.linalg.eigh(K) # Select largest eigenvector, reorder to w,x,y,z quaternion @@ -219,7 +228,7 @@ def mat2quat(M): def mult(q1, q2): - """ Multiply two quaternions + """Multiply two quaternions Parameters ---------- @@ -244,7 +253,7 @@ def mult(q1, q2): def conjugate(q): - """ Conjugate of quaternion + """Conjugate of quaternion Parameters ---------- @@ -260,7 +269,7 @@ def conjugate(q): def norm(q): - """ Return norm of quaternion + """Return norm of quaternion Parameters ---------- @@ -276,12 +285,12 @@ def norm(q): def isunit(q): - """ Return True is this is very nearly a unit quaternion """ + """Return True is this is very nearly a unit quaternion""" return np.allclose(norm(q), 1) def inverse(q): - """ Return multiplicative inverse of quaternion `q` + """Return multiplicative inverse of quaternion `q` Parameters ---------- @@ -297,12 +306,12 @@ def inverse(q): def eye(): - """ Return identity quaternion """ + """Return identity quaternion""" return np.array([1.0, 0, 0, 0]) def rotate_vector(v, q): - """ Apply transformation in quaternion `q` to vector `v` + """Apply transformation in quaternion `q` to vector `v` Parameters ---------- @@ -328,7 +337,7 @@ def rotate_vector(v, q): def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): - """ Returns True if `q1` and `q2` give near equivalent transforms + """Returns True if `q1` and `q2` give near equivalent transforms `q1` may be nearly numerically equal to `q2`, or nearly equal to `q2` * -1 (because a quaternion multiplied by -1 gives the same transform). @@ -363,7 +372,7 @@ def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): def angle_axis2quat(theta, vector, is_normalized=False): - """ Quaternion for rotation of angle `theta` around `vector` + """Quaternion for rotation of angle `theta` around `vector` Parameters ---------- @@ -398,12 +407,11 @@ def angle_axis2quat(theta, vector, is_normalized=False): vector = vector / math.sqrt(np.dot(vector, vector)) t2 = theta / 2.0 st2 = math.sin(t2) - return np.concatenate(([math.cos(t2)], - vector * st2)) + return np.concatenate(([math.cos(t2)], vector * st2)) def angle_axis2mat(theta, vector, is_normalized=False): - """ Rotation matrix of angle `theta` around `vector` + """Rotation matrix of angle `theta` around `vector` Parameters ---------- @@ -435,13 +443,17 @@ def angle_axis2mat(theta, vector, is_normalized=False): xs, ys, zs = x * s, y * s, z * s xC, yC, zC = x * C, y * C, z * C xyC, yzC, zxC = x * yC, y * zC, z * xC - return np.array([[x * xC + c, xyC - zs, zxC + ys], - [xyC + zs, y * yC + c, yzC - xs], - [zxC - ys, yzC + xs, z * zC + c]]) + return np.array( + [ + [x * xC + c, xyC - zs, zxC + ys], + [xyC + zs, y * yC + c, yzC - xs], + [zxC - ys, yzC + xs, z * zC + c], + ] + ) def quat2angle_axis(quat, identity_thresh=None): - """ Convert quaternion to rotation of angle around axis + """Convert quaternion to rotation of angle around axis Parameters ---------- diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index 1e4033b676..a63894cef8 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -1,4 +1,4 @@ -""" ReStructured Text utilities +"""ReStructured Text utilities * Make ReST table given array of values """ @@ -6,14 +6,10 @@ import numpy as np -def rst_table(cell_values, - row_names=None, - col_names=None, - title='', - val_fmt='{0:5.2f}', - format_chars=None - ): - """ Return string for ReST table with entries `cell_values` +def rst_table( + cell_values, row_names=None, col_names=None, title='', val_fmt='{0:5.2f}', format_chars=None +): + """Return string for ReST table with entries `cell_values` Parameters ---------- @@ -82,36 +78,26 @@ def rst_table(cell_values, if max_len > col_len: col_len = max_len row_str_list.append(row_strs) - row_name_fmt = "{0:<" + str(row_len) + "}" + row_name_fmt = '{0:<' + str(row_len) + '}' row_names = [row_name_fmt.format(name) for name in row_names] - col_name_fmt = "{0:^" + str(col_len) + "}" + col_name_fmt = '{0:^' + str(col_len) + '}' col_names = [col_name_fmt.format(name) for name in col_names] col_headings = [' ' * row_len] + col_names col_header = down_joiner.join(col_headings) row_val_fmt = '{0:<' + str(col_len) + '}' table_strs = [] if title != '': - table_strs += [title_heading * len(title), - title, - title_heading * len(title), - ''] + table_strs += [title_heading * len(title), title, title_heading * len(title), ''] along_headings = [along * len(h) for h in col_headings] - crossed_line = (cross_starter + - cross_joiner.join(along_headings) + - cross_ender) + crossed_line = cross_starter + cross_joiner.join(along_headings) + cross_ender thick_long_headings = [thick_long * len(h) for h in col_headings] - crossed_thick_line = (cross_thick_starter + - cross_thick_joiner.join(thick_long_headings) + - cross_thick_ender) - table_strs += [crossed_line, - down_starter + col_header + down_ender, - crossed_thick_line] + crossed_thick_line = ( + cross_thick_starter + cross_thick_joiner.join(thick_long_headings) + cross_thick_ender + ) + table_strs += [crossed_line, down_starter + col_header + down_ender, crossed_thick_line] for row_no, row_name in enumerate(row_names): - row_vals = [row_val_fmt.format(row_str) - for row_str in row_str_list[row_no]] - row_line = (down_starter + - down_joiner.join([row_name] + row_vals) + - down_ender) + row_vals = [row_val_fmt.format(row_str) for row_str in row_str_list[row_no]] + row_line = down_starter + down_joiner.join([row_name] + row_vals) + down_ender table_strs.append(row_line) table_strs.append(crossed_line) return '\n'.join(table_strs) diff --git a/nibabel/spaces.py b/nibabel/spaces.py index dac8fdd049..d06a39b0ed 100644 --- a/nibabel/spaces.py +++ b/nibabel/spaces.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Routines to work with spaces +"""Routines to work with spaces A space is defined by coordinate axes. @@ -28,7 +28,7 @@ def vox2out_vox(mapped_voxels, voxel_sizes=None): - """ output-aligned shape, affine for input implied by `mapped_voxels` + """output-aligned shape, affine for input implied by `mapped_voxels` The input (voxel) space, and the affine mapping to output space, are given in `mapped_voxels`. @@ -95,7 +95,7 @@ def vox2out_vox(mapped_voxels, voxel_sizes=None): def slice2volume(index, axis, shape=None): - """ Affine expressing selection of a single slice from 3D volume + """Affine expressing selection of a single slice from 3D volume Imagine we have taken a slice from an image data array, ``s = data[:, :, index]``. This function returns the affine to map the array coordinates of @@ -129,9 +129,9 @@ def slice2volume(index, axis, shape=None): the embedded volume """ if index < 0: - raise ValueError("Cannot handle negative index") + raise ValueError('Cannot handle negative index') if not 0 <= axis <= 2: - raise ValueError("Axis should be between 0 and 2") + raise ValueError('Axis should be between 0 and 2') axes = list(range(4)) axes.remove(axis) slice_aff = np.eye(4)[:, axes] diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 09744d0149..7977943ffd 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" A simple spatial image class +"""A simple spatial image class The image class maintains the association between a 3D (or greater) array, and an affine transform that maps voxel coordinates to some world space. @@ -127,7 +127,6 @@ >>> img3 = nib.AnalyzeImage.from_file_map(file_map) >>> np.all(img3.get_fdata(dtype=np.float32) == data) True - """ import numpy as np @@ -142,22 +141,20 @@ class HeaderDataError(Exception): - """ Class to indicate error in getting or setting header data """ + """Class to indicate error in getting or setting header data""" class HeaderTypeError(Exception): - """ Class to indicate error in parameters into header functions """ + """Class to indicate error in parameters into header functions""" class SpatialHeader(FileBasedHeader): - """ Template class to implement header protocol """ + """Template class to implement header protocol""" + default_x_flip = True data_layout = 'F' - def __init__(self, - data_dtype=np.float32, - shape=(0,), - zooms=None): + def __init__(self, data_dtype=np.float32, shape=(0,), zooms=None): self.set_data_dtype(data_dtype) self._zooms = () self.set_data_shape(shape) @@ -174,9 +171,7 @@ def from_header(klass, header=None): # different field names if type(header) == klass: return header.copy() - return klass(header.get_data_dtype(), - header.get_data_shape(), - header.get_zooms()) + return klass(header.get_data_dtype(), header.get_data_shape(), header.get_zooms()) @classmethod def from_fileobj(klass, fileobj): @@ -186,18 +181,17 @@ def write_to(self, fileobj): raise NotImplementedError def __eq__(self, other): - return ((self.get_data_dtype(), - self.get_data_shape(), - self.get_zooms()) == - (other.get_data_dtype(), - other.get_data_shape(), - other.get_zooms())) + return (self.get_data_dtype(), self.get_data_shape(), self.get_zooms()) == ( + other.get_data_dtype(), + other.get_data_shape(), + other.get_zooms(), + ) def __ne__(self, other): return not self == other def copy(self): - """ Copy object to independent representation + """Copy object to independent representation The copy should not be affected by any changes to the original object. @@ -232,8 +226,7 @@ def set_zooms(self, zooms): shape = self.get_data_shape() ndim = len(shape) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' - % (ndim, ndim)) + raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) if len([z for z in zooms if z < 0]): raise HeaderDataError('zooms must be positive') self._zooms = zooms @@ -241,13 +234,12 @@ def set_zooms(self, zooms): def get_base_affine(self): shape = self.get_data_shape() zooms = self.get_zooms() - return shape_zoom_affine(shape, zooms, - self.default_x_flip) + return shape_zoom_affine(shape, zooms, self.default_x_flip) get_best_affine = get_base_affine def data_to_fileobj(self, data, fileobj, rescale=True): - """ Write array data `data` as binary to `fileobj` + """Write array data `data` as binary to `fileobj` Parameters ---------- @@ -264,7 +256,7 @@ def data_to_fileobj(self, data, fileobj, rescale=True): fileobj.write(data.astype(dtype).tobytes(order=self.data_layout)) def data_from_fileobj(self, fileobj): - """ Read binary image data from `fileobj` """ + """Read binary image data from `fileobj`""" dtype = self.get_data_dtype() shape = self.get_data_shape() data_size = int(np.prod(shape) * dtype.itemsize) @@ -273,7 +265,7 @@ def data_from_fileobj(self, fileobj): def supported_np_types(obj): - """ Numpy data types that instance `obj` supports + """Numpy data types that instance `obj` supports Parameters ---------- @@ -308,16 +300,20 @@ class ImageDataError(Exception): class SpatialFirstSlicer: - """ Slicing interface that returns a new image with an updated affine + """Slicing interface that returns a new image with an updated affine Checks that an image's first three axes are spatial """ + def __init__(self, img): # Local import to avoid circular import on module load from .imageclasses import spatial_axes_first + if not spatial_axes_first(img): - raise ValueError("Cannot predict position of spatial axes for " - "Image type " + img.__class__.__name__) + raise ValueError( + 'Cannot predict position of spatial axes for ' + 'Image type ' + img.__class__.__name__ + ) self.img = img def __getitem__(self, slicer): @@ -328,13 +324,13 @@ def __getitem__(self, slicer): dataobj = self.img.dataobj[slicer] if any(dim == 0 for dim in dataobj.shape): - raise IndexError("Empty slice requested") + raise IndexError('Empty slice requested') affine = self.slice_affine(slicer) return self.img.__class__(dataobj.copy(), affine, self.img.header) def check_slicing(self, slicer, return_spatial=False): - """ Canonicalize slicers and check for scalar indices in spatial dims + """Canonicalize slicers and check for scalar indices in spatial dims Parameters ---------- @@ -357,14 +353,15 @@ def check_slicing(self, slicer, return_spatial=False): spatial_slices = slicer[:3] for subslicer in spatial_slices: if subslicer is None: - raise IndexError("New axis not permitted in spatial dimensions") + raise IndexError('New axis not permitted in spatial dimensions') elif isinstance(subslicer, int): - raise IndexError("Scalar indices disallowed in spatial dimensions; " - "Use `[x]` or `x:x+1`.") + raise IndexError( + 'Scalar indices disallowed in spatial dimensions; ' 'Use `[x]` or `x:x+1`.' + ) return spatial_slices if return_spatial else slicer def slice_affine(self, slicer): - """ Retrieve affine for current image, if sliced by a given index + """Retrieve affine for current image, if sliced by a given index Applies scaling if down-sampling is applied, and adjusts the intercept to account for any cropping. @@ -392,7 +389,7 @@ def slice_affine(self, slicer): for i, subslicer in enumerate(slicer): if isinstance(subslicer, slice): if subslicer.step == 0: - raise ValueError("slice step cannot be 0") + raise ValueError('slice step cannot be 0') transform[i, i] = subslicer.step if subslicer.step is not None else 1 transform[i, 3] = subslicer.start or 0 # If slicer is None, nothing to do @@ -401,13 +398,13 @@ def slice_affine(self, slicer): class SpatialImage(DataobjImage): - """ Template class for volumetric (3D/4D) images """ + """Template class for volumetric (3D/4D) images""" + header_class = SpatialHeader ImageSlicer = SpatialFirstSlicer - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None): - """ Initialize image + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): + """Initialize image The image is a combination of (array-like, affine matrix, header), with optional metadata in `extra`, and filename / file-like objects @@ -432,8 +429,7 @@ def __init__(self, dataobj, affine, header=None, file_map : mapping, optional mapping giving file information for this image format """ - super(SpatialImage, self).__init__(dataobj, header=header, extra=extra, - file_map=file_map) + super(SpatialImage, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) if affine is not None: # Check that affine is array-like 4,4. Maybe this is too strict at # this abstract level, but so far I think all image formats we know @@ -458,7 +454,7 @@ def affine(self): return self._affine def update_header(self): - """ Harmonize header with image data and affine + """Harmonize header with image data and affine >>> data = np.zeros((2,3,4)) >>> affine = np.diag([1.0,2.0,3.0,1.0]) @@ -487,7 +483,7 @@ def update_header(self): self._affine2header() def _affine2header(self): - """ Unconditionally set affine into the header """ + """Unconditionally set affine into the header""" RZS = self._affine[:3, :3] vox = np.sqrt(np.sum(RZS * RZS, axis=0)) hdr = self._header @@ -499,12 +495,16 @@ def _affine2header(self): def __str__(self): shape = self.shape affine = self.affine - return '\n'.join((str(self.__class__), - f'data shape {shape}', - 'affine: ', - str(affine), - 'metadata:', - str(self._header))) + return '\n'.join( + ( + str(self.__class__), + f'data shape {shape}', + 'affine: ', + str(affine), + 'metadata:', + str(self._header), + ) + ) def get_data_dtype(self): return self._header.get_data_dtype() @@ -514,7 +514,7 @@ def set_data_dtype(self, dtype): @classmethod def from_image(klass, img): - """ Class method to create new instance of own class from `img` + """Class method to create new instance of own class from `img` Parameters ---------- @@ -527,14 +527,16 @@ def from_image(klass, img): cimg : ``spatialimage`` instance Image, of our own class """ - return klass(img.dataobj, - img.affine, - klass.header_class.from_header(img.header), - extra=img.extra.copy()) + return klass( + img.dataobj, + img.affine, + klass.header_class.from_header(img.header), + extra=img.extra.copy(), + ) @property def slicer(self): - """ Slicer object that returns cropped and subsampled images + """Slicer object that returns cropped and subsampled images The image is resliced in the current orientation; no rotation or resampling is performed, and no attempt is made to filter the image @@ -553,16 +555,17 @@ def slicer(self): return self.ImageSlicer(self) def __getitem__(self, idx): - """ No slicing or dictionary interface for images + """No slicing or dictionary interface for images Use the slicer attribute to perform cropping and subsampling at your own risk. """ raise TypeError( - "Cannot slice image objects; consider using `img.slicer[slice]` " - "to generate a sliced image (see documentation for caveats) or " - "slicing image array data with `img.dataobj[slice]` or " - "`img.get_fdata()[slice]`") + 'Cannot slice image objects; consider using `img.slicer[slice]` ' + 'to generate a sliced image (see documentation for caveats) or ' + 'slicing image array data with `img.dataobj[slice]` or ' + '`img.get_fdata()[slice]`' + ) def orthoview(self): """Plot the image using OrthoSlicer3D @@ -578,8 +581,7 @@ def orthoview(self): consider using viewer.show() (equivalently plt.show()) to show the figure. """ - return OrthoSlicer3D(self.dataobj, self.affine, - title=self.get_filename()) + return OrthoSlicer3D(self.dataobj, self.affine, title=self.get_filename()) def as_reoriented(self, ornt): """Apply an orientation change and return a new image diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index 6786b19a0c..67389403b9 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -6,24 +6,20 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to SPM2 version of analyze image format """ +"""Read / write access to SPM2 version of analyze image format""" import numpy as np from . import spm99analyze as spm99 # module import image_dimension_dtd = spm99.image_dimension_dtd[:] -image_dimension_dtd[ - image_dimension_dtd.index(('funused2', 'f4')) -] = ('scl_inter', 'f4') +image_dimension_dtd[image_dimension_dtd.index(('funused2', 'f4'))] = ('scl_inter', 'f4') # Full header numpy dtype combined across sub-fields -header_dtype = np.dtype(spm99.header_key_dtd + - image_dimension_dtd + - spm99.data_history_dtd) +header_dtype = np.dtype(spm99.header_key_dtd + image_dimension_dtd + spm99.data_history_dtd) class Spm2AnalyzeHeader(spm99.Spm99AnalyzeHeader): - """ Class for SPM2 variant of basic Analyze header + """Class for SPM2 variant of basic Analyze header SPM2 variant adds the following to basic Analyze format: @@ -36,7 +32,7 @@ class Spm2AnalyzeHeader(spm99.Spm99AnalyzeHeader): template_dtype = header_dtype def get_slope_inter(self): - """ Get data scaling (slope) and intercept from header data + """Get data scaling (slope) and intercept from header data Uses the algorithm from SPM2 spm_vol_ana.m by John Ashburner @@ -118,16 +114,19 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) bs_hdr_struct = hdr_struct.byteswap() - return (binaryblock[344:348] not in (b'ni1\x00', b'n+1\x00') and - 348 in (hdr_struct['sizeof_hdr'], bs_hdr_struct['sizeof_hdr'])) + return binaryblock[344:348] not in (b'ni1\x00', b'n+1\x00') and 348 in ( + hdr_struct['sizeof_hdr'], + bs_hdr_struct['sizeof_hdr'], + ) class Spm2AnalyzeImage(spm99.Spm99AnalyzeImage): - """ Class for SPM2 variant of basic Analyze image - """ + """Class for SPM2 variant of basic Analyze image""" + header_class = Spm2AnalyzeHeader diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index b858a5efff..1f9d7a3589 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to SPM99 version of analyze image format """ +"""Read / write access to SPM99 version of analyze image format""" import warnings import numpy as np @@ -17,29 +17,25 @@ from .batteryrunners import Report from . import analyze # module import from .optpkg import optional_package + have_scipy = optional_package('scipy')[1] """ Support subtle variations of SPM version of Analyze """ header_key_dtd = analyze.header_key_dtd # funused1 in dime subfield is scalefactor image_dimension_dtd = analyze.image_dimension_dtd[:] -image_dimension_dtd[ - image_dimension_dtd.index(('funused1', 'f4')) -] = ('scl_slope', 'f4') +image_dimension_dtd[image_dimension_dtd.index(('funused1', 'f4'))] = ('scl_slope', 'f4') # originator text field used as image origin (translations) data_history_dtd = analyze.data_history_dtd[:] -data_history_dtd[ - data_history_dtd.index(('originator', 'S10')) -] = ('origin', 'i2', (5,)) +data_history_dtd[data_history_dtd.index(('originator', 'S10'))] = ('origin', 'i2', (5,)) # Full header numpy dtype combined across sub-fields -header_dtype = np.dtype(header_key_dtd + - image_dimension_dtd + - data_history_dtd) +header_dtype = np.dtype(header_key_dtd + image_dimension_dtd + data_history_dtd) class SpmAnalyzeHeader(analyze.AnalyzeHeader): - """ Basic scaling Spm Analyze header """ + """Basic scaling Spm Analyze header""" + # Copies of module level definitions template_dtype = header_dtype @@ -49,13 +45,13 @@ class SpmAnalyzeHeader(analyze.AnalyzeHeader): @classmethod def default_structarr(klass, endianness=None): - """ Create empty header binary block with given endianness """ + """Create empty header binary block with given endianness""" hdr_data = super(SpmAnalyzeHeader, klass).default_structarr(endianness) hdr_data['scl_slope'] = 1 return hdr_data def get_slope_inter(self): - """ Get scalefactor and intercept + """Get scalefactor and intercept If scalefactor is 0.0 return None to indicate no scalefactor. Intercept is always None because SPM99 analyze cannot store intercepts. @@ -67,7 +63,7 @@ def get_slope_inter(self): return slope, None def set_slope_inter(self, slope, inter=None): - """ Set slope and / or intercept into header + """Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -93,12 +89,11 @@ def set_slope_inter(self, slope, inter=None): self._structarr['scl_slope'] = slope if inter in (None, 0) or np.isnan(inter): return - raise HeaderTypeError('Cannot set non-zero intercept ' - 'for SPM headers') + raise HeaderTypeError('Cannot set non-zero intercept ' 'for SPM headers') class Spm99AnalyzeHeader(SpmAnalyzeHeader): - """ Class for SPM99 variant of basic Analyze header + """Class for SPM99 variant of basic Analyze header SPM99 variant adds the following to basic Analyze format: @@ -107,7 +102,7 @@ class Spm99AnalyzeHeader(SpmAnalyzeHeader): """ def get_origin_affine(self): - """ Get affine from header, using SPM origin field if sensible + """Get affine from header, using SPM origin field if sensible The default translations are got from the ``origin`` field, if set, or from the center of the image otherwise. @@ -146,8 +141,7 @@ def get_origin_affine(self): # Remember that the origin is for matlab (1-based indexing) origin = hdr['origin'][:3] dims = hdr['dim'][1:4] - if (np.any(origin) and - np.all(origin > -dims) and np.all(origin < dims * 2)): + if np.any(origin) and np.all(origin > -dims) and np.all(origin < dims * 2): origin = origin - 1 else: origin = (dims - 1) / 2.0 @@ -159,7 +153,7 @@ def get_origin_affine(self): get_best_affine = get_origin_affine def set_origin_from_affine(self, affine): - """ Set SPM origin to header from affine matrix. + """Set SPM origin to header from affine matrix. The ``origin`` field was read but not written by SPM99 and 2. It was used for storing a central voxel coordinate, that could be used in @@ -221,8 +215,7 @@ def _chk_origin(hdr, fix=False): rep = Report(HeaderDataError) origin = hdr['origin'][0:3] dims = hdr['dim'][1:4] - if (not np.any(origin) or - (np.all(origin > -dims) and np.all(origin < dims * 2))): + if not np.any(origin) or (np.all(origin > -dims) and np.all(origin < dims * 2)): return hdr, rep rep.problem_level = 20 rep.problem_msg = 'very large origin values relative to dims' @@ -232,19 +225,17 @@ def _chk_origin(hdr, fix=False): class Spm99AnalyzeImage(analyze.AnalyzeImage): - """ Class for SPM99 variant of basic Analyze image - """ + """Class for SPM99 variant of basic Analyze image""" + header_class = Spm99AnalyzeHeader - files_types = (('image', '.img'), - ('header', '.hdr'), - ('mat', '.mat')) + files_types = (('image', '.img'), ('header', '.hdr'), ('mat', '.mat')) has_affine = True makeable = True rw = have_scipy @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -275,7 +266,8 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): """ ret = super(Spm99AnalyzeImage, klass).from_file_map( - file_map, mmap=mmap, keep_file_open=keep_file_open) + file_map, mmap=mmap, keep_file_open=keep_file_open + ) try: matf = file_map['mat'].get_prepare_fileobj() except OSError: @@ -286,12 +278,12 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): if len(contents) == 0: return ret import scipy.io as sio + mats = sio.loadmat(BytesIO(contents)) if 'mat' in mats: # this overrides a 'M', and includes any flip mat = mats['mat'] if mat.ndim > 2: - warnings.warn('More than one affine in "mat" matrix, ' - 'using first') + warnings.warn('More than one affine in "mat" matrix, ' 'using first') mat = mat[:, :, 0] ret._affine = mat elif 'M' in mats: # the 'M' matrix does not include flips @@ -309,7 +301,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): return ret def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Extends Analyze ``to_file_map`` method by writing ``mat`` file @@ -326,6 +318,7 @@ def to_file_map(self, file_map=None, dtype=None): if mat is None: return import scipy.io as sio + hdr = self._header if hdr.default_x_flip: M = np.dot(np.diag([-1, 1, 1, 1]), mat) diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 860ae2cb39..5e8d87b671 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -1,4 +1,4 @@ -""" Multiformat-capable streamline format read / write interface +"""Multiformat-capable streamline format read / write interface """ import os import warnings @@ -12,13 +12,11 @@ from .tck import TckFile # List of all supported formats -FORMATS = {".trk": TrkFile, - ".tck": TckFile - } +FORMATS = {'.trk': TrkFile, '.tck': TckFile} def is_supported(fileobj): - """ Checks if the file-like object if supported by NiBabel. + """Checks if the file-like object if supported by NiBabel. Parameters ---------- @@ -35,7 +33,7 @@ def is_supported(fileobj): def detect_format(fileobj): - """ Returns the StreamlinesFile object guessed from the file-like object. + """Returns the StreamlinesFile object guessed from the file-like object. Parameters ---------- @@ -64,7 +62,7 @@ def detect_format(fileobj): def load(fileobj, lazy_load=False): - """ Loads streamlines in *RAS+* and *mm* space from a file-like object. + """Loads streamlines in *RAS+* and *mm* space from a file-like object. Parameters ---------- @@ -96,7 +94,7 @@ def load(fileobj, lazy_load=False): def save(tractogram, filename, **kwargs): - r""" Saves a tractogram to a file. + r"""Saves a tractogram to a file. Parameters ---------- @@ -123,15 +121,15 @@ def save(tractogram, filename, **kwargs): else: # Assume it's a TractogramFile object. tractogram_file = tractogram - if (tractogram_file_class is None or - not isinstance(tractogram_file, tractogram_file_class)): - msg = ("The extension you specified is unusual for the provided" - " 'TractogramFile' object.") + if tractogram_file_class is None or not isinstance(tractogram_file, tractogram_file_class): + msg = ( + 'The extension you specified is unusual for the provided' + " 'TractogramFile' object." + ) warnings.warn(msg, ExtensionWarning) if kwargs: - msg = ("A 'TractogramFile' object was provided, no need for" - " keyword arguments.") + msg = "A 'TractogramFile' object was provided, no need for" ' keyword arguments.' raise ValueError(msg) tractogram_file.save(filename) diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index cff930aaee..bb03e6bfd0 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -1,4 +1,3 @@ - import numbers from operator import mul from functools import reduce @@ -9,7 +8,7 @@ def is_array_sequence(obj): - """ Return True if `obj` is an array sequence. """ + """Return True if `obj` is an array sequence.""" try: return obj.is_array_sequence except AttributeError: @@ -17,9 +16,9 @@ def is_array_sequence(obj): def is_ndarray_of_int_or_bool(obj): - return (isinstance(obj, np.ndarray) and - (np.issubdtype(obj.dtype, np.integer) or - np.issubdtype(obj.dtype, np.bool_))) + return isinstance(obj, np.ndarray) and ( + np.issubdtype(obj.dtype, np.integer) or np.issubdtype(obj.dtype, np.bool_) + ) class _BuildCache: @@ -31,8 +30,7 @@ def __init__(self, arr_seq, common_shape, dtype): # Use the passed dtype only if null data array self.dtype = dtype if arr_seq._data.size == 0 else arr_seq._data.dtype if arr_seq.common_shape != () and common_shape != arr_seq.common_shape: - raise ValueError( - "All dimensions, except the first one, must match exactly") + raise ValueError('All dimensions, except the first one, must match exactly') self.common_shape = common_shape n_in_row = reduce(mul, common_shape, 1) bytes_per_row = n_in_row * dtype.itemsize @@ -44,25 +42,29 @@ def update_seq(self, arr_seq): def _define_operators(cls): - """ Decorator which adds support for some Python operators. """ - def _wrap(cls, op, inplace=False, unary=False): + """Decorator which adds support for some Python operators.""" + def _wrap(cls, op, inplace=False, unary=False): def fn_unary_op(self): try: return self._op(op) except SystemError as e: - message = ("Numpy returned an uninformative error. It possibly should be " - "'Integers to negative integer powers are not allowed.' " - "See https://github.com/numpy/numpy/issues/19634 for details.") + message = ( + 'Numpy returned an uninformative error. It possibly should be ' + "'Integers to negative integer powers are not allowed.' " + 'See https://github.com/numpy/numpy/issues/19634 for details.' + ) raise ValueError(message) from e def fn_binary_op(self, value): try: return self._op(op, value, inplace=inplace) except SystemError as e: - message = ("Numpy returned an uninformative error. It possibly should be " - "'Integers to negative integer powers are not allowed.' " - "See https://github.com/numpy/numpy/issues/19634 for details.") + message = ( + 'Numpy returned an uninformative error. It possibly should be ' + "'Integers to negative integer powers are not allowed.' " + 'See https://github.com/numpy/numpy/issues/19634 for details.' + ) raise ValueError(message) from e setattr(cls, op, fn_unary_op if unary else fn_binary_op) @@ -70,16 +72,27 @@ def fn_binary_op(self, value): fn.__name__ = op fn.__doc__ = getattr(np.ndarray, op).__doc__ - for op in ["__add__", "__sub__", "__mul__", "__mod__", "__pow__", - "__floordiv__", "__truediv__", "__lshift__", "__rshift__", - "__or__", "__and__", "__xor__"]: + for op in [ + '__add__', + '__sub__', + '__mul__', + '__mod__', + '__pow__', + '__floordiv__', + '__truediv__', + '__lshift__', + '__rshift__', + '__or__', + '__and__', + '__xor__', + ]: _wrap(cls, op=op, inplace=False) _wrap(cls, op=f"__i{op.strip('_')}__", inplace=True) - for op in ["__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__"]: + for op in ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__']: _wrap(cls, op) - for op in ["__neg__", "__abs__", "__invert__"]: + for op in ['__neg__', '__abs__', '__invert__']: _wrap(cls, op, unary=True) return cls @@ -87,7 +100,7 @@ def fn_binary_op(self, value): @_define_operators class ArraySequence: - """ Sequence of ndarrays having variable first dimension sizes. + """Sequence of ndarrays having variable first dimension sizes. This is a container that can store multiple ndarrays where each ndarray might have a different first dimension size but a *common* size for the @@ -100,7 +113,7 @@ class ArraySequence: """ def __init__(self, iterable=None, buffer_size=4): - """ Initialize array sequence instance + """Initialize array sequence instance Parameters ---------- @@ -144,16 +157,16 @@ def is_array_sequence(self): @property def common_shape(self): - """ Matching shape of the elements in this array sequence. """ + """Matching shape of the elements in this array sequence.""" return self._data.shape[1:] @property def total_nb_rows(self): - """ Total number of rows in this array sequence. """ + """Total number of rows in this array sequence.""" return np.sum(self._lengths) def get_data(self): - """ Returns a *copy* of the elements in this array sequence. + """Returns a *copy* of the elements in this array sequence. Notes ----- @@ -164,31 +177,31 @@ def get_data(self): return self.copy()._data def _check_shape(self, arrseq): - """ Check whether this array sequence is compatible with another. """ - msg = "cannot perform operation - array sequences have different" + """Check whether this array sequence is compatible with another.""" + msg = 'cannot perform operation - array sequences have different' if len(self._lengths) != len(arrseq._lengths): - msg += f" lengths: {len(self._lengths)} vs. {len(arrseq._lengths)}." + msg += f' lengths: {len(self._lengths)} vs. {len(arrseq._lengths)}.' raise ValueError(msg) if self.total_nb_rows != arrseq.total_nb_rows: - msg += f" amount of data: {self.total_nb_rows} vs. {arrseq.total_nb_rows}." + msg += f' amount of data: {self.total_nb_rows} vs. {arrseq.total_nb_rows}.' raise ValueError(msg) if self.common_shape != arrseq.common_shape: - msg += f" common shape: {self.common_shape} vs. {arrseq.common_shape}." + msg += f' common shape: {self.common_shape} vs. {arrseq.common_shape}.' raise ValueError(msg) return True def _get_next_offset(self): - """ Offset in ``self._data`` at which to write next rowelement """ + """Offset in ``self._data`` at which to write next rowelement""" if len(self._offsets) == 0: return 0 imax = np.argmax(self._offsets) return self._offsets[imax] + self._lengths[imax] def append(self, element, cache_build=False): - """ Appends `element` to this array sequence. + """Appends `element` to this array sequence. Append can be a lot faster if it knows that it is appending several elements instead of a single element. In that case it can cache the @@ -242,7 +255,7 @@ def append(self, element, cache_build=False): build_cache.update_seq(self) def finalize_append(self): - """ Finalize process of appending several elements to `self` + """Finalize process of appending several elements to `self` :meth:`append` can be a lot faster if it knows that it is appending several elements instead of a single element. To tell the append @@ -257,7 +270,7 @@ def finalize_append(self): self.shrink_data() def _resize_data_to(self, n_rows, build_cache): - """ Resize data array if required """ + """Resize data array if required""" # Calculate new data shape, rounding up to nearest buffer size n_bufs = np.ceil(n_rows / build_cache.rows_per_buf) extended_n_rows = int(n_bufs * build_cache.rows_per_buf) @@ -272,11 +285,10 @@ def _resize_data_to(self, n_rows, build_cache): self._data.resize(new_shape, refcheck=False) def shrink_data(self): - self._data.resize((self._get_next_offset(),) + self.common_shape, - refcheck=False) + self._data.resize((self._get_next_offset(),) + self.common_shape, refcheck=False) def extend(self, elements): - """ Appends all `elements` to this array sequence. + """Appends all `elements` to this array sequence. Parameters ---------- @@ -307,8 +319,7 @@ def extend(self, elements): e0 = np.asarray(elements[0]) n_elements = np.sum([len(e) for e in elements]) self._build_cache = _BuildCache(self, e0.shape[1:], e0.dtype) - self._resize_data_to(self._get_next_offset() + n_elements, - self._build_cache) + self._resize_data_to(self._get_next_offset() + n_elements, self._build_cache) for e in elements: self.append(e, cache_build=True) @@ -316,7 +327,7 @@ def extend(self, elements): self.finalize_append() def copy(self): - """ Creates a copy of this :class:`ArraySequence` object. + """Creates a copy of this :class:`ArraySequence` object. Returns ------- @@ -331,15 +342,14 @@ def copy(self): """ seq = self.__class__() total_lengths = np.sum(self._lengths) - seq._data = np.empty((total_lengths,) + self._data.shape[1:], - dtype=self._data.dtype) + seq._data = np.empty((total_lengths,) + self._data.shape[1:], dtype=self._data.dtype) next_offset = 0 offsets = [] for offset, length in zip(self._offsets, self._lengths): offsets.append(next_offset) - chunk = self._data[offset:offset + length] - seq._data[next_offset:next_offset + length] = chunk + chunk = self._data[offset : offset + length] + seq._data[next_offset : next_offset + length] = chunk next_offset += length seq._offsets = np.asarray(offsets) @@ -348,7 +358,7 @@ def copy(self): return seq def __getitem__(self, idx): - """ Get sequence(s) through standard or advanced numpy indexing. + """Get sequence(s) through standard or advanced numpy indexing. Parameters ---------- @@ -368,7 +378,7 @@ def __getitem__(self, idx): """ if isinstance(idx, (numbers.Integral, np.integer)): start = self._offsets[idx] - return self._data[start:start + self._lengths[idx]] + return self._data[start : start + self._lengths[idx]] seq = self.__class__() seq._is_view = True @@ -390,11 +400,13 @@ def __getitem__(self, idx): seq._lengths = self._lengths[off_idx] return seq - raise TypeError("Index must be either an int, a slice, a list of int" - " or a ndarray of bool! Not " + str(type(idx))) + raise TypeError( + 'Index must be either an int, a slice, a list of int' + ' or a ndarray of bool! Not ' + str(type(idx)) + ) def __setitem__(self, idx, elements): - """ Set sequence(s) through standard or advanced numpy indexing. + """Set sequence(s) through standard or advanced numpy indexing. Parameters ---------- @@ -411,7 +423,7 @@ def __setitem__(self, idx, elements): """ if isinstance(idx, (numbers.Integral, np.integer)): start = self._offsets[idx] - self._data[start:start + self._lengths[idx]] = elements + self._data[start : start + self._lengths[idx]] = elements return if isinstance(idx, tuple): @@ -431,31 +443,33 @@ def __setitem__(self, idx, elements): lengths = self._lengths[off_idx] else: - raise TypeError("Index must be either an int, a slice, a list of int" - " or a ndarray of bool! Not " + str(type(idx))) + raise TypeError( + 'Index must be either an int, a slice, a list of int' + ' or a ndarray of bool! Not ' + str(type(idx)) + ) if is_array_sequence(elements): if len(lengths) != len(elements): - msg = f"Trying to set {len(lengths)} sequences with {len(elements)} sequences." + msg = f'Trying to set {len(lengths)} sequences with {len(elements)} sequences.' raise ValueError(msg) if sum(lengths) != elements.total_nb_rows: - msg = f"Trying to set {sum(lengths)} points with {elements.total_nb_rows} points." + msg = f'Trying to set {sum(lengths)} points with {elements.total_nb_rows} points.' raise ValueError(msg) for o1, l1, o2, l2 in zip(offsets, lengths, elements._offsets, elements._lengths): - data[o1:o1 + l1] = elements._data[o2:o2 + l2] + data[o1 : o1 + l1] = elements._data[o2 : o2 + l2] elif isinstance(elements, numbers.Number): for o1, l1 in zip(offsets, lengths): - data[o1:o1 + l1] = elements + data[o1 : o1 + l1] = elements else: # Try to iterate over it. for o1, l1, element in zip(offsets, lengths, elements): - data[o1:o1 + l1] = element + data[o1 : o1 + l1] = element def _op(self, op, value=None, inplace=False): - """ Applies some operator to this arraysequence. + """Applies some operator to this arraysequence. This handles both unary and binary operators with a scalar or another array sequence. Operations are performed directly on the underlying @@ -475,18 +489,25 @@ def _op(self, op, value=None, inplace=False): seq = self if inplace else self.copy() if is_array_sequence(value) and seq._check_shape(value): - elements = zip(seq._offsets, seq._lengths, - self._offsets, self._lengths, - value._offsets, value._lengths) + elements = zip( + seq._offsets, + seq._lengths, + self._offsets, + self._lengths, + value._offsets, + value._lengths, + ) # Change seq.dtype to match the operation resulting type. o0, l0, o1, l1, o2, l2 = next(elements) - tmp = getattr(self._data[o1:o1 + l1], op)(value._data[o2:o2 + l2]) + tmp = getattr(self._data[o1 : o1 + l1], op)(value._data[o2 : o2 + l2]) seq._data = seq._data.astype(tmp.dtype) - seq._data[o0:o0 + l0] = tmp + seq._data[o0 : o0 + l0] = tmp for o0, l0, o1, l1, o2, l2 in elements: - seq._data[o0:o0 + l0] = getattr(self._data[o1:o1 + l1], op)(value._data[o2:o2 + l2]) + seq._data[o0 : o0 + l0] = getattr(self._data[o1 : o1 + l1], op)( + value._data[o2 : o2 + l2] + ) else: args = [] if value is None else [value] # Dealing with unary and binary ops. @@ -494,22 +515,23 @@ def _op(self, op, value=None, inplace=False): # Change seq.dtype to match the operation resulting type. o0, l0, o1, l1 = next(elements) - tmp = getattr(self._data[o1:o1 + l1], op)(*args) + tmp = getattr(self._data[o1 : o1 + l1], op)(*args) seq._data = seq._data.astype(tmp.dtype) - seq._data[o0:o0 + l0] = tmp + seq._data[o0 : o0 + l0] = tmp for o0, l0, o1, l1 in elements: - seq._data[o0:o0 + l0] = getattr(self._data[o1:o1 + l1], op)(*args) + seq._data[o0 : o0 + l0] = getattr(self._data[o1 : o1 + l1], op)(*args) return seq def __iter__(self): if len(self._lengths) != len(self._offsets): - raise ValueError("ArraySequence object corrupted:" - " len(self._lengths) != len(self._offsets)") + raise ValueError( + 'ArraySequence object corrupted:' ' len(self._lengths) != len(self._offsets)' + ) for offset, lengths in zip(self._offsets, self._lengths): - yield self._data[offset: offset + lengths] + yield self._data[offset : offset + lengths] def __len__(self): return len(self._offsets) @@ -519,33 +541,30 @@ def __repr__(self): # Show only the first and last edgeitems. edgeitems = np.get_printoptions()['edgeitems'] data = str(list(self[:edgeitems]))[:-1] - data += ", ..., " + data += ', ..., ' data += str(list(self[-edgeitems:]))[1:] else: data = str(list(self)) - return f"{self.__class__.__name__}({data})" + return f'{self.__class__.__name__}({data})' def save(self, filename): - """ Saves this :class:`ArraySequence` object to a .npz file. """ - np.savez(filename, - data=self._data, - offsets=self._offsets, - lengths=self._lengths) + """Saves this :class:`ArraySequence` object to a .npz file.""" + np.savez(filename, data=self._data, offsets=self._offsets, lengths=self._lengths) @classmethod def load(cls, filename): - """ Loads a :class:`ArraySequence` object from a .npz file. """ + """Loads a :class:`ArraySequence` object from a .npz file.""" content = np.load(filename) seq = cls() - seq._data = content["data"] - seq._offsets = content["offsets"] - seq._lengths = content["lengths"] + seq._data = content['data'] + seq._offsets = content['offsets'] + seq._lengths = content['lengths'] return seq def create_arraysequences_from_generator(gen, n, buffer_sizes=None): - """ Creates :class:`ArraySequence` objects from a generator yielding tuples + """Creates :class:`ArraySequence` objects from a generator yielding tuples Parameters ---------- @@ -572,7 +591,7 @@ def create_arraysequences_from_generator(gen, n, buffer_sizes=None): def concatenate(seqs, axis): - """ Concatenates multiple :class:`ArraySequence` objects along an axis. + """Concatenates multiple :class:`ArraySequence` objects along an axis. Parameters ---------- diff --git a/nibabel/streamlines/header.py b/nibabel/streamlines/header.py index 523035f3ee..2aed10c62c 100644 --- a/nibabel/streamlines/header.py +++ b/nibabel/streamlines/header.py @@ -1,22 +1,23 @@ -""" Field class defining common header fields in tractogram files +"""Field class defining common header fields in tractogram files """ class Field: - """ Header fields common to multiple streamline file formats. + """Header fields common to multiple streamline file formats. In IPython, use `nibabel.streamlines.Field??` to list them. """ - NB_STREAMLINES = "nb_streamlines" - STEP_SIZE = "step_size" - METHOD = "method" - NB_SCALARS_PER_POINT = "nb_scalars_per_point" - NB_PROPERTIES_PER_STREAMLINE = "nb_properties_per_streamline" - NB_POINTS = "nb_points" - VOXEL_SIZES = "voxel_sizes" - DIMENSIONS = "dimensions" - MAGIC_NUMBER = "magic_number" - ORIGIN = "origin" - VOXEL_TO_RASMM = "voxel_to_rasmm" - VOXEL_ORDER = "voxel_order" - ENDIANNESS = "endianness" + + NB_STREAMLINES = 'nb_streamlines' + STEP_SIZE = 'step_size' + METHOD = 'method' + NB_SCALARS_PER_POINT = 'nb_scalars_per_point' + NB_PROPERTIES_PER_STREAMLINE = 'nb_properties_per_streamline' + NB_POINTS = 'nb_points' + VOXEL_SIZES = 'voxel_sizes' + DIMENSIONS = 'dimensions' + MAGIC_NUMBER = 'magic_number' + ORIGIN = 'origin' + VOXEL_TO_RASMM = 'voxel_to_rasmm' + VOXEL_ORDER = 'voxel_order' + ENDIANNESS = 'endianness' diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 37bdbe3ffb..7fb5cde8b3 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -1,4 +1,4 @@ -""" Read / write access to TCK streamlines format. +"""Read / write access to TCK streamlines format. TCK format is defined at http://mrtrix.readthedocs.io/en/latest/getting_started/image_data.html?highlight=format#tracks-file-format-tck @@ -23,7 +23,7 @@ class TckFile(TractogramFile): - """ Convenience class to encapsulate TCK file format. + """Convenience class to encapsulate TCK file format. Notes ----- @@ -42,8 +42,9 @@ class TckFile(TractogramFile): .. [#] http://www.nitrc.org/pipermail/mrtrix-discussion/2014-January/000859.html .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ + # Constants - MAGIC_NUMBER = b"mrtrix tracks" + MAGIC_NUMBER = b'mrtrix tracks' SUPPORTS_DATA_PER_POINT = False # Not yet SUPPORTS_DATA_PER_STREAMLINE = False # Not yet @@ -73,7 +74,7 @@ def __init__(self, tractogram, header=None): @classmethod def is_correct_format(cls, fileobj): - """ Check if the file is in TCK format. + """Check if the file is in TCK format. Parameters ---------- @@ -97,18 +98,18 @@ def is_correct_format(cls, fileobj): @classmethod def create_empty_header(cls): - """ Return an empty compliant TCK header as dict """ + """Return an empty compliant TCK header as dict""" header = {} # Default values header[Field.MAGIC_NUMBER] = cls.MAGIC_NUMBER header[Field.NB_STREAMLINES] = 0 - header['datatype'] = "Float32LE" + header['datatype'] = 'Float32LE' return header @classmethod def load(cls, fileobj, lazy_load=False): - """ Loads streamlines from a filename or file-like object. + """Loads streamlines from a filename or file-like object. Parameters ---------- @@ -139,6 +140,7 @@ def load(cls, fileobj, lazy_load=False): hdr = cls._read_header(fileobj) if lazy_load: + def _read(): for pts in cls._read(fileobj, hdr): yield TractogramItem(pts, {}, {}) @@ -162,7 +164,7 @@ def _finalize_header(self, f, header, offset=0): self._write_header(f, header) def save(self, fileobj): - """ Save tractogram to a filename or file-like object using TCK format. + """Save tractogram to a filename or file-like object using TCK format. Parameters ---------- @@ -181,7 +183,7 @@ def save(self, fileobj): # Keep counts for correcting incoherent fields or warn. nb_streamlines = 0 - with Opener(fileobj, mode="wb") as f: + with Opener(fileobj, mode='wb') as f: # Keep track of the beginning of the header. beginning = f.tell() @@ -209,16 +211,20 @@ def save(self, fileobj): data_for_streamline = first_item.data_for_streamline if len(data_for_streamline) > 0: - keys = ", ".join(data_for_streamline.keys()) - msg = ("TCK format does not support saving additional " - f"data alongside streamlines. Dropping: {keys}") + keys = ', '.join(data_for_streamline.keys()) + msg = ( + 'TCK format does not support saving additional ' + f'data alongside streamlines. Dropping: {keys}' + ) warnings.warn(msg, DataWarning) data_for_points = first_item.data_for_points if len(data_for_points) > 0: - keys = ", ".join(data_for_points.keys()) - msg = ("TCK format does not support saving additional " - f"data alongside points. Dropping: {keys}") + keys = ', '.join(data_for_points.keys()) + msg = ( + 'TCK format does not support saving additional ' + f'data alongside points. Dropping: {keys}' + ) warnings.warn(msg, DataWarning) for t in tractogram: @@ -234,7 +240,7 @@ def save(self, fileobj): @staticmethod def _write_header(fileobj, header): - """ Write TCK header to file-like object. + """Write TCK header to file-like object. Parameters ---------- @@ -243,32 +249,36 @@ def _write_header(fileobj, header): ready to read from the beginning of the TCK header). """ # Fields to exclude - exclude = [Field.MAGIC_NUMBER, # Handled separately. - Field.NB_STREAMLINES, # Handled separately. - Field.ENDIANNESS, # Handled separately. - Field.VOXEL_TO_RASMM, # Streamlines are always in RAS+ mm. - "count", "datatype", "file"] # Fields being replaced. + exclude = [ + Field.MAGIC_NUMBER, # Handled separately. + Field.NB_STREAMLINES, # Handled separately. + Field.ENDIANNESS, # Handled separately. + Field.VOXEL_TO_RASMM, # Streamlines are always in RAS+ mm. + 'count', + 'datatype', + 'file', + ] # Fields being replaced. lines = [ - f"count: {header[Field.NB_STREAMLINES]:010}", - "datatype: Float32LE", # Always Float32LE. + f'count: {header[Field.NB_STREAMLINES]:010}', + 'datatype: Float32LE', # Always Float32LE. ] - lines.extend(f"{k}: {v}" - for k, v in header.items() - if k not in exclude and not k.startswith("_")) - out = "\n".join(lines) + lines.extend( + f'{k}: {v}' for k, v in header.items() if k not in exclude and not k.startswith('_') + ) + out = '\n'.join(lines) # Check the header is well formatted. - if out.count("\n") > len(lines) - 1: # \n only allowed between lines. + if out.count('\n') > len(lines) - 1: # \n only allowed between lines. msg = f"Key-value pairs cannot contain '\\n':\n{out}" raise HeaderError(msg) - if out.count(":") > len(lines): + if out.count(':') > len(lines): # : only one per line (except the last one which contains END). msg = f"Key-value pairs cannot contain ':':\n{out}" raise HeaderError(msg) - out = header[Field.MAGIC_NUMBER] + b"\n" + out.encode('utf-8') + out = header[Field.MAGIC_NUMBER] + b'\n' + out.encode('utf-8') # Compute data offset considering the offset string representation # headers + "file" header + END + \n's @@ -284,7 +294,7 @@ def _write_header(fileobj, header): @classmethod def _read_header(cls, fileobj): - """ Reads a TCK header from a file. + """Reads a TCK header from a file. Parameters ---------- @@ -316,7 +326,7 @@ def _read_header(cls, fileobj): magic_number = f.read(len(cls.MAGIC_NUMBER)) if magic_number != cls.MAGIC_NUMBER: - raise HeaderError(f"Invalid magic number: {magic_number}") + raise HeaderError(f'Invalid magic number: {magic_number}') hdr[Field.MAGIC_NUMBER] = magic_number @@ -331,18 +341,18 @@ def _read_header(cls, fileobj): if not line: # Skip empty lines continue - if line == "END": # End of the header + if line == 'END': # End of the header found_end = True break if ':' not in line: # Invalid header line - raise HeaderError(f"Invalid header (line {n_line}): {line}") + raise HeaderError(f'Invalid header (line {n_line}): {line}') - key, value = line.split(":", 1) + key, value = line.split(':', 1) hdr[key.strip()] = value.strip() if not found_end: - raise HeaderError("Missing END in the header.") + raise HeaderError('Missing END in the header.') offset_data = f.tell() @@ -352,14 +362,15 @@ def _read_header(cls, fileobj): # Check integrity of TCK header. if 'datatype' not in hdr: - msg = ("Missing 'datatype' attribute in TCK header." - " Assuming it is Float32LE.") + msg = "Missing 'datatype' attribute in TCK header." ' Assuming it is Float32LE.' warnings.warn(msg, HeaderWarning) - hdr['datatype'] = "Float32LE" + hdr['datatype'] = 'Float32LE' if not hdr['datatype'].startswith('Float32'): - msg = ("TCK only supports float32 dtype but 'datatype: " - f"{hdr['datatype']}' was specified in the header.") + msg = ( + "TCK only supports float32 dtype but 'datatype: " + f"{hdr['datatype']}' was specified in the header." + ) raise HeaderError(msg) if 'file' not in hdr: @@ -368,8 +379,10 @@ def _read_header(cls, fileobj): hdr['file'] = f'. {offset_data}' if hdr['file'].split()[0] != '.': - msg = ("TCK only supports single-file - in other words the filename part must be " - f"specified as '.' but '{hdr['file'].split()[0]}' was specified.") + msg = ( + 'TCK only supports single-file - in other words the filename part must be ' + f"specified as '.' but '{hdr['file'].split()[0]}' was specified." + ) raise HeaderError("Missing 'file' attribute in TCK header.") # Set endianness and _dtype attributes in the header. @@ -384,7 +397,7 @@ def _read_header(cls, fileobj): @classmethod def _read(cls, fileobj, header, buffer_size=4): - """ Return generator that reads TCK data from `fileobj` given `header` + """Return generator that reads TCK data from `fileobj` given `header` Parameters ---------- @@ -403,7 +416,7 @@ def _read(cls, fileobj, header, buffer_size=4): points : ndarray of shape (n_pts, 3) Streamline points """ - dtype = header["_dtype"] + dtype = header['_dtype'] coordinate_size = 3 * dtype.itemsize # Make buffer_size an integer and a multiple of coordinate_size. buffer_size = int(buffer_size * MEGABYTE) @@ -413,7 +426,7 @@ def _read(cls, fileobj, header, buffer_size=4): start_position = f.tell() # Set the file position at the beginning of the data. - f.seek(header["_offset_data"], os.SEEK_SET) + f.seek(header['_offset_data'], os.SEEK_SET) eof = False leftover = np.empty((0, 3), dtype='=' hdr_size = trk_struct['hdr_size'] @@ -197,8 +208,7 @@ def test_load_complex_file_in_big_endian(self): assert hdr_size == 1000 for lazy_load in [False, True]: - trk = TrkFile.load(DATA['complex_trk_big_endian_fname'], - lazy_load=lazy_load) + trk = TrkFile.load(DATA['complex_trk_big_endian_fname'], lazy_load=lazy_load) with pytest.warns(Warning) if lazy_load else error_warnings(): assert_tractogram_equal(trk.tractogram, DATA['complex_tractogram']) @@ -225,8 +235,7 @@ def test_write_empty_file(self): assert trk_file.read() == open(DATA['empty_trk_fname'], 'rb').read() def test_write_simple_file(self): - tractogram = Tractogram(DATA['streamlines'], - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -244,9 +253,9 @@ def test_write_simple_file(self): def test_write_complex_file(self): # With scalars - tractogram = Tractogram(DATA['streamlines'], - data_per_point=DATA['data_per_point'], - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_point=DATA['data_per_point'], affine_to_rasmm=np.eye(4) + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -258,9 +267,9 @@ def test_write_complex_file(self): # With properties data_per_streamline = DATA['data_per_streamline'] - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_streamline=data_per_streamline, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) trk_file = BytesIO() @@ -272,10 +281,12 @@ def test_write_complex_file(self): # With scalars and properties data_per_streamline = DATA['data_per_streamline'] - tractogram = Tractogram(DATA['streamlines'], - data_per_point=DATA['data_per_point'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], + data_per_point=DATA['data_per_point'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -292,9 +303,11 @@ def test_write_complex_file(self): assert trk_file.read() == open(DATA['complex_trk_fname'], 'rb').read() def test_load_write_file(self): - for fname in [DATA['empty_trk_fname'], - DATA['simple_trk_fname'], - DATA['complex_trk_fname']]: + for fname in [ + DATA['empty_trk_fname'], + DATA['simple_trk_fname'], + DATA['complex_trk_fname'], + ]: for lazy_load in [False, True]: trk = TrkFile.load(fname, lazy_load=lazy_load) trk_file = BytesIO() @@ -332,7 +345,7 @@ def test_load_write_LPS_file(self): # For TRK file format, the default voxel order is LPS. header = copy.deepcopy(trk_LPS.header) - header[Field.VOXEL_ORDER] = b"" + header[Field.VOXEL_ORDER] = b'' trk = TrkFile(trk_LPS.tractogram, header) trk.save(trk_file) @@ -361,7 +374,7 @@ def test_write_optional_header_fields(self): trk_file.seek(0, os.SEEK_SET) new_trk = TrkFile.load(trk_file) - assert "extra" not in new_trk.header + assert 'extra' not in new_trk.header def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_point. @@ -369,9 +382,9 @@ def test_write_too_many_scalars_and_properties(self): for i in range(10): data_per_point[f'#{i}'] = DATA['fa'] - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -384,9 +397,9 @@ def test_write_too_many_scalars_and_properties(self): # More than 10 data_per_point should raise an error. data_per_point[f'#{i + 1}'] = DATA['fa'] - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) with pytest.raises(ValueError): @@ -397,9 +410,11 @@ def test_write_too_many_scalars_and_properties(self): for i in range(10): data_per_streamline[f'#{i}'] = DATA['mean_torsion'] - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -412,8 +427,7 @@ def test_write_too_many_scalars_and_properties(self): # More than 10 data_per_streamline should raise an error. data_per_streamline[f'#{i + 1}'] = DATA['mean_torsion'] - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline) + tractogram = Tractogram(DATA['streamlines'], data_per_streamline=data_per_streamline) trk = TrkFile(tractogram) with pytest.raises(ValueError): @@ -426,10 +440,10 @@ def test_write_scalars_and_properties_name_too_long(self): # So in reality we allow name of 18 characters, otherwise # the name is truncated and warning is issue. for nb_chars in range(22): - data_per_point = {'A'*nb_chars: DATA['colors']} - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + data_per_point = {'A' * nb_chars: DATA['colors']} + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) if nb_chars > 18: @@ -438,10 +452,10 @@ def test_write_scalars_and_properties_name_too_long(self): else: trk.save(BytesIO()) - data_per_point = {'A'*nb_chars: DATA['fa']} - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + data_per_point = {'A' * nb_chars: DATA['fa']} + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) if nb_chars > 20: @@ -456,10 +470,12 @@ def test_write_scalars_and_properties_name_too_long(self): # So in reality we allow name of 18 characters, otherwise # the name is truncated and warning is issue. for nb_chars in range(22): - data_per_streamline = {'A'*nb_chars: DATA['mean_colors']} - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + data_per_streamline = {'A' * nb_chars: DATA['mean_colors']} + tractogram = Tractogram( + DATA['streamlines'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk = TrkFile(tractogram) if nb_chars > 18: @@ -468,10 +484,12 @@ def test_write_scalars_and_properties_name_too_long(self): else: trk.save(BytesIO()) - data_per_streamline = {'A'*nb_chars: DATA['mean_torsion']} - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + data_per_streamline = {'A' * nb_chars: DATA['mean_torsion']} + tractogram = Tractogram( + DATA['streamlines'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk = TrkFile(tractogram) if nb_chars > 20: diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index 5b67af1ab3..cf9a099fe4 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -10,17 +10,17 @@ def is_data_dict(obj): - """ True if `obj` seems to implement the :class:`DataDict` API """ + """True if `obj` seems to implement the :class:`DataDict` API""" return hasattr(obj, 'store') def is_lazy_dict(obj): - """ True if `obj` seems to implement the :class:`LazyDict` API """ + """True if `obj` seems to implement the :class:`LazyDict` API""" return is_data_dict(obj) and callable(list(obj.store.values())[0]) class SliceableDataDict(MutableMapping): - r""" Dictionary for which key access can do slicing on the values. + r"""Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -33,6 +33,7 @@ class SliceableDataDict(MutableMapping): Positional and keyword arguments, passed straight through the ``dict`` constructor. """ + def __init__(self, *args, **kwargs): self.store = dict() self.update(dict(*args, **kwargs)) @@ -73,7 +74,7 @@ def __len__(self): class PerArrayDict(SliceableDataDict): - r""" Dictionary for which key access can do slicing on the values. + r"""Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -93,6 +94,7 @@ class PerArrayDict(SliceableDataDict): Positional and keyword arguments, passed straight through the ``dict`` constructor. """ + def __init__(self, n_rows=0, *args, **kwargs): self.n_rows = n_rows super(PerArrayDict, self).__init__(*args, **kwargs) @@ -102,24 +104,24 @@ def __setitem__(self, key, value): if value.ndim == 1 and value.dtype != object: # Reshape without copy - value.shape = ((len(value), 1)) + value.shape = (len(value), 1) if value.ndim != 2: - raise ValueError("data_per_streamline must be a 2D array.") + raise ValueError('data_per_streamline must be a 2D array.') # We make sure there is the right amount of values if 0 < self.n_rows != len(value): - msg = f"The number of values ({len(value)}) should match n_elements ({self.n_rows})." + msg = f'The number of values ({len(value)}) should match n_elements ({self.n_rows}).' raise ValueError(msg) self.store[key] = value def _extend_entry(self, key, value): - """ Appends the `value` to the entry specified by `key`. """ + """Appends the `value` to the entry specified by `key`.""" self[key] = np.concatenate([self[key], value]) def extend(self, other): - """ Appends the elements of another :class:`PerArrayDict`. + """Appends the elements of another :class:`PerArrayDict`. That is, for each entry in this dictionary, we append the elements coming from the other dictionary at the corresponding entry. @@ -137,11 +139,12 @@ def extend(self, other): ----- The keys in both dictionaries must be the same. """ - if (len(self) > 0 and len(other) > 0 and - sorted(self.keys()) != sorted(other.keys())): - msg = ("Entry mismatched between the two PerArrayDict objects. " - f"This PerArrayDict contains '{sorted(self.keys())}' " - f"whereas the other contains '{sorted(other.keys())}'.") + if len(self) > 0 and len(other) > 0 and sorted(self.keys()) != sorted(other.keys()): + msg = ( + 'Entry mismatched between the two PerArrayDict objects. ' + f"This PerArrayDict contains '{sorted(self.keys())}' " + f"whereas the other contains '{sorted(other.keys())}'." + ) raise ValueError(msg) self.n_rows += other.n_rows @@ -153,7 +156,7 @@ def extend(self, other): class PerArraySequenceDict(PerArrayDict): - """ Dictionary for which key access can do slicing on the values. + """Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -163,29 +166,31 @@ class PerArraySequenceDict(PerArrayDict): sequences matches the number of elements given at the instantiation of the instance. """ + def __setitem__(self, key, value): value = ArraySequence(value) # We make sure there is the right amount of data. if 0 < self.n_rows != value.total_nb_rows: - msg = f"The number of values ({value.total_nb_rows}) should match ({self.n_rows})." + msg = f'The number of values ({value.total_nb_rows}) should match ({self.n_rows}).' raise ValueError(msg) self.store[key] = value def _extend_entry(self, key, value): - """ Appends the `value` to the entry specified by `key`. """ + """Appends the `value` to the entry specified by `key`.""" self[key].extend(value) class LazyDict(MutableMapping): - """ Dictionary of generator functions. + """Dictionary of generator functions. This container behaves like a dictionary but it makes sure its elements are callable objects that it assumes are generator functions yielding values. When getting the element associated with a given key, the element (i.e. a generator function) is first called before being returned. """ + def __init__(self, *args, **kwargs): self.store = dict() # Use the 'update' method to set the keys. @@ -204,9 +209,11 @@ def __getitem__(self, key): def __setitem__(self, key, value): if not callable(value): - msg = ("Values in a `LazyDict` must be generator functions." - " These are functions which, when called, return an" - " instantiated generator.") + msg = ( + 'Values in a `LazyDict` must be generator functions.' + ' These are functions which, when called, return an' + ' instantiated generator.' + ) raise TypeError(msg) self.store[key] = value @@ -221,7 +228,7 @@ def __len__(self): class TractogramItem: - """ Class containing information about one streamline. + """Class containing information about one streamline. :class:`TractogramItem` objects have three public attributes: `streamline`, `data_for_streamline`, and `data_for_points`. @@ -241,6 +248,7 @@ class TractogramItem: (Nt, Mk), where ``Nt`` is the number of points of this streamline and ``Mk`` is the dimension of the data associated with key ``k``. """ + def __init__(self, streamline, data_for_streamline, data_for_points): self.streamline = np.asarray(streamline) self.data_for_streamline = data_for_streamline @@ -254,7 +262,7 @@ def __len__(self): class Tractogram: - """ Container for streamlines and their data information. + """Container for streamlines and their data information. Streamlines of a tractogram can be in any coordinate system of your choice as long as you provide the correct `affine_to_rasmm` matrix, at @@ -292,10 +300,10 @@ class Tractogram: .. [#] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ - def __init__(self, streamlines=None, - data_per_streamline=None, - data_per_point=None, - affine_to_rasmm=None): + + def __init__( + self, streamlines=None, data_per_streamline=None, data_per_point=None, affine_to_rasmm=None + ): """ Parameters ---------- @@ -341,7 +349,8 @@ def data_per_streamline(self): @data_per_streamline.setter def data_per_streamline(self, value): self._data_per_streamline = PerArrayDict( - len(self.streamlines), {} if value is None else value) + len(self.streamlines), {} if value is None else value + ) @property def data_per_point(self): @@ -350,11 +359,12 @@ def data_per_point(self): @data_per_point.setter def data_per_point(self, value): self._data_per_point = PerArraySequenceDict( - self.streamlines.total_nb_rows, {} if value is None else value) + self.streamlines.total_nb_rows, {} if value is None else value + ) @property def affine_to_rasmm(self): - """ Affine bringing streamlines in this tractogram to RAS+mm. """ + """Affine bringing streamlines in this tractogram to RAS+mm.""" return copy.deepcopy(self._affine_to_rasmm) @affine_to_rasmm.setter @@ -362,8 +372,10 @@ def affine_to_rasmm(self, value): if value is not None: value = np.array(value) if value.shape != (4, 4): - msg = ("Affine matrix has a shape of (4, 4) but a ndarray with " - f"shape {value.shape} was provided instead.") + msg = ( + 'Affine matrix has a shape of (4, 4) but a ndarray with ' + f'shape {value.shape} was provided instead.' + ) raise ValueError(msg) self._affine_to_rasmm = value @@ -386,18 +398,19 @@ def __getitem__(self, idx): if isinstance(idx, (numbers.Integral, np.integer)): return TractogramItem(pts, data_per_streamline, data_per_point) - return Tractogram(pts, data_per_streamline, data_per_point, - affine_to_rasmm=self.affine_to_rasmm) + return Tractogram( + pts, data_per_streamline, data_per_point, affine_to_rasmm=self.affine_to_rasmm + ) def __len__(self): return len(self.streamlines) def copy(self): - """ Returns a copy of this :class:`Tractogram` object. """ + """Returns a copy of this :class:`Tractogram` object.""" return copy.deepcopy(self) def apply_affine(self, affine, lazy=False): - """ Applies an affine transformation on the points of each streamline. + """Applies an affine transformation on the points of each streamline. If `lazy` is not specified, this is performed *in-place*. @@ -438,13 +451,12 @@ def apply_affine(self, affine, lazy=False): if self.affine_to_rasmm is not None: # Update the affine that brings back the streamlines to RASmm. - self.affine_to_rasmm = np.dot(self.affine_to_rasmm, - np.linalg.inv(affine)) + self.affine_to_rasmm = np.dot(self.affine_to_rasmm, np.linalg.inv(affine)) return self def to_world(self, lazy=False): - """ Brings the streamlines to world space (i.e. RAS+ and mm). + """Brings the streamlines to world space (i.e. RAS+ and mm). If `lazy` is not specified, this is performed *in-place*. @@ -464,14 +476,16 @@ def to_world(self, lazy=False): :class:`Tractogram` object with updated streamlines. """ if self.affine_to_rasmm is None: - msg = ("Streamlines are in a unknown space. This error can be" - " avoided by setting the 'affine_to_rasmm' property.") + msg = ( + 'Streamlines are in a unknown space. This error can be' + " avoided by setting the 'affine_to_rasmm' property." + ) raise ValueError(msg) return self.apply_affine(self.affine_to_rasmm, lazy=lazy) def extend(self, other): - """ Appends the data of another :class:`Tractogram`. + """Appends the data of another :class:`Tractogram`. Data that will be appended includes the streamlines and the content of both dictionaries `data_per_streamline` and `data_per_point`. @@ -506,7 +520,7 @@ def __add__(self, other): class LazyTractogram(Tractogram): - """ Lazy container for streamlines and their data information. + """Lazy container for streamlines and their data information. This container behaves lazily as it uses generator functions to manage streamlines and their data information. This container is thus memory @@ -557,10 +571,10 @@ class LazyTractogram(Tractogram): .. [#] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ - def __init__(self, streamlines=None, - data_per_streamline=None, - data_per_point=None, - affine_to_rasmm=None): + + def __init__( + self, streamlines=None, data_per_streamline=None, data_per_point=None, affine_to_rasmm=None + ): """ Parameters ---------- @@ -589,17 +603,16 @@ def __init__(self, streamlines=None, refers to the center of the voxel. By default, the streamlines are in an unknown space, i.e. affine_to_rasmm is None. """ - super(LazyTractogram, self).__init__(streamlines, - data_per_streamline, - data_per_point, - affine_to_rasmm) + super(LazyTractogram, self).__init__( + streamlines, data_per_streamline, data_per_point, affine_to_rasmm + ) self._nb_streamlines = None self._data = None self._affine_to_apply = np.eye(4) @classmethod def from_tractogram(cls, tractogram): - """ Creates a :class:`LazyTractogram` object from a :class:`Tractogram` object. + """Creates a :class:`LazyTractogram` object from a :class:`Tractogram` object. Parameters ---------- @@ -633,7 +646,7 @@ def _gen(key): @classmethod def from_data_func(cls, data_func): - """ Creates an instance from a generator function. + """Creates an instance from a generator function. The generator function must yield :class:`TractogramItem` objects. @@ -650,7 +663,7 @@ def from_data_func(cls, data_func): New lazy tractogram. """ if not callable(data_func): - raise TypeError("`data_func` must be a generator function.") + raise TypeError('`data_func` must be a generator function.') lazy_tractogram = cls() lazy_tractogram._data = data_func @@ -660,8 +673,7 @@ def from_data_func(cls, data_func): # Set data_per_streamline using data_func def _gen(key): - return lambda: (t.data_for_streamline[key] - for t in data_func()) + return lambda: (t.data_for_streamline[key] for t in data_func()) data_per_streamline_keys = first_item.data_for_streamline.keys() for k in data_per_streamline_keys: @@ -690,6 +702,7 @@ def streamlines(self): # Check if we need to apply an affine. if not np.allclose(self._affine_to_apply, np.eye(4)): + def _apply_affine(): for s in streamlines_gen: yield apply_affine(self._affine_to_apply, s) @@ -700,9 +713,11 @@ def _apply_affine(): def _set_streamlines(self, value): if value is not None and not callable(value): - msg = ("`streamlines` must be a generator function. That is a" - " function which, when called, returns an instantiated" - " generator.") + msg = ( + '`streamlines` must be a generator function. That is a' + ' function which, when called, returns an instantiated' + ' generator.' + ) raise TypeError(msg) self._streamlines = value @@ -768,28 +783,33 @@ def __iter__(self): def __len__(self): # Check if we know how many streamlines there are. if self._nb_streamlines is None: - warn("Number of streamlines will be determined manually by looping" - " through the streamlines. If you know the actual number of" - " streamlines, you might want to set it beforehand via" - " `self.header.nb_streamlines`.", Warning) + warn( + 'Number of streamlines will be determined manually by looping' + ' through the streamlines. If you know the actual number of' + ' streamlines, you might want to set it beforehand via' + ' `self.header.nb_streamlines`.', + Warning, + ) # Count the number of streamlines. self._nb_streamlines = sum(1 for _ in self.streamlines) return self._nb_streamlines def copy(self): - """ Returns a copy of this :class:`LazyTractogram` object. """ - tractogram = LazyTractogram(self._streamlines, - self._data_per_streamline, - self._data_per_point, - self.affine_to_rasmm) + """Returns a copy of this :class:`LazyTractogram` object.""" + tractogram = LazyTractogram( + self._streamlines, + self._data_per_streamline, + self._data_per_point, + self.affine_to_rasmm, + ) tractogram._nb_streamlines = self._nb_streamlines tractogram._data = self._data tractogram._affine_to_apply = self._affine_to_apply.copy() return tractogram def apply_affine(self, affine, lazy=True): - """ Applies an affine transformation to the streamlines. + """Applies an affine transformation to the streamlines. The transformation given by the `affine` matrix is applied after any other pending transformations to the streamline points. @@ -809,7 +829,7 @@ def apply_affine(self, affine, lazy=True): transformation to be applied on the streamlines. """ if not lazy: - msg = "LazyTractogram only supports lazy transformations." + msg = 'LazyTractogram only supports lazy transformations.' raise ValueError(msg) tractogram = self.copy() # New instance. @@ -819,12 +839,11 @@ def apply_affine(self, affine, lazy=True): if tractogram.affine_to_rasmm is not None: # Update the affine that brings back the streamlines to RASmm. - tractogram.affine_to_rasmm = np.dot(self.affine_to_rasmm, - np.linalg.inv(affine)) + tractogram.affine_to_rasmm = np.dot(self.affine_to_rasmm, np.linalg.inv(affine)) return tractogram def to_world(self, lazy=True): - """ Brings the streamlines to world space (i.e. RAS+ and mm). + """Brings the streamlines to world space (i.e. RAS+ and mm). The transformation is applied after any other pending transformations to the streamline points. @@ -842,8 +861,10 @@ def to_world(self, lazy=True): transformation to be applied on the streamlines. """ if self.affine_to_rasmm is None: - msg = ("Streamlines are in a unknown space. This error can be" - " avoided by setting the 'affine_to_rasmm' property.") + msg = ( + 'Streamlines are in a unknown space. This error can be' + " avoided by setting the 'affine_to_rasmm' property." + ) raise ValueError(msg) return self.apply_affine(self.affine_to_rasmm, lazy=lazy) diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index f8184c8ba9..321ea3d2ad 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -1,4 +1,4 @@ -""" Define abstract interface for Tractogram file classes +"""Define abstract interface for Tractogram file classes """ from abc import ABC, abstractmethod @@ -6,23 +6,23 @@ class ExtensionWarning(Warning): - """ Base class for warnings about tractogram file extension. """ + """Base class for warnings about tractogram file extension.""" class HeaderWarning(Warning): - """ Base class for warnings about tractogram file header. """ + """Base class for warnings about tractogram file header.""" class DataWarning(Warning): - """ Base class for warnings about tractogram file data. """ + """Base class for warnings about tractogram file data.""" class HeaderError(Exception): - """ Raised when a tractogram file header contains invalid information. """ + """Raised when a tractogram file header contains invalid information.""" class DataError(Exception): - """ Raised when data is missing or inconsistent in a tractogram file. """ + """Raised when data is missing or inconsistent in a tractogram file.""" class abstractclassmethod(classmethod): @@ -34,7 +34,7 @@ def __init__(self, callable): class TractogramFile(ABC): - """ Convenience class to encapsulate tractogram file format. """ + """Convenience class to encapsulate tractogram file format.""" def __init__(self, tractogram, header=None): self._tractogram = tractogram @@ -54,12 +54,12 @@ def header(self): @property def affine(self): - """ voxmm -> rasmm affine. """ + """voxmm -> rasmm affine.""" return self.header.get(Field.VOXEL_TO_RASMM) @abstractclassmethod def is_correct_format(cls, fileobj): - """ Checks if the file has the right streamlines file format. + """Checks if the file has the right streamlines file format. Parameters ---------- @@ -78,12 +78,12 @@ def is_correct_format(cls, fileobj): @classmethod def create_empty_header(cls): - """ Returns an empty header for this streamlines file format. """ + """Returns an empty header for this streamlines file format.""" return {} @abstractclassmethod def load(cls, fileobj, lazy_load=True): - """ Loads streamlines from a filename or file-like object. + """Loads streamlines from a filename or file-like object. Parameters ---------- @@ -105,7 +105,7 @@ def load(cls, fileobj, lazy_load=True): @abstractmethod def save(self, fileobj): - """ Saves streamlines to a filename or file-like object. + """Saves streamlines to a filename or file-like object. Parameters ---------- diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 6b45aae122..eb382af4d0 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -1,4 +1,3 @@ - # Definition of trackvis header structure: # http://www.trackvis.org/docs/?subsect=fileformat @@ -13,8 +12,8 @@ import nibabel as nib from nibabel.openers import Opener -from nibabel.volumeutils import (native_code, swapped_code, endian_codes) -from nibabel.orientations import (aff2axcodes, axcodes2ornt) +from nibabel.volumeutils import native_code, swapped_code, endian_codes +from nibabel.orientations import aff2axcodes, axcodes2ornt from .array_sequence import create_arraysequences_from_generator from .tractogram_file import TractogramFile @@ -32,38 +31,38 @@ # coordinates (axes L->R, P->A, I->S). If (0 based) value [3, 3] from # this matrix is 0, this means the matrix is not recorded. # See http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -header_2_dtd = [(Field.MAGIC_NUMBER, 'S6'), - (Field.DIMENSIONS, 'h', 3), - (Field.VOXEL_SIZES, 'f4', 3), - (Field.ORIGIN, 'f4', 3), - (Field.NB_SCALARS_PER_POINT, 'h'), - ('scalar_name', 'S20', MAX_NB_NAMED_SCALARS_PER_POINT), - (Field.NB_PROPERTIES_PER_STREAMLINE, 'h'), - ('property_name', 'S20', - MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE), - (Field.VOXEL_TO_RASMM, 'f4', (4, 4)), # New in version 2. - ('reserved', 'S444'), - (Field.VOXEL_ORDER, 'S4'), - ('pad2', 'S4'), - ('image_orientation_patient', 'f4', 6), - ('pad1', 'S2'), - ('invert_x', 'S1'), - ('invert_y', 'S1'), - ('invert_z', 'S1'), - ('swap_xy', 'S1'), - ('swap_yz', 'S1'), - ('swap_zx', 'S1'), - (Field.NB_STREAMLINES, 'i4'), - ('version', 'i4'), - ('hdr_size', 'i4'), - ] +header_2_dtd = [ + (Field.MAGIC_NUMBER, 'S6'), + (Field.DIMENSIONS, 'h', 3), + (Field.VOXEL_SIZES, 'f4', 3), + (Field.ORIGIN, 'f4', 3), + (Field.NB_SCALARS_PER_POINT, 'h'), + ('scalar_name', 'S20', MAX_NB_NAMED_SCALARS_PER_POINT), + (Field.NB_PROPERTIES_PER_STREAMLINE, 'h'), + ('property_name', 'S20', MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE), + (Field.VOXEL_TO_RASMM, 'f4', (4, 4)), # New in version 2. + ('reserved', 'S444'), + (Field.VOXEL_ORDER, 'S4'), + ('pad2', 'S4'), + ('image_orientation_patient', 'f4', 6), + ('pad1', 'S2'), + ('invert_x', 'S1'), + ('invert_y', 'S1'), + ('invert_z', 'S1'), + ('swap_xy', 'S1'), + ('swap_yz', 'S1'), + ('swap_zx', 'S1'), + (Field.NB_STREAMLINES, 'i4'), + ('version', 'i4'), + ('hdr_size', 'i4'), +] # Full header numpy dtypes header_2_dtype = np.dtype(header_2_dtd) def get_affine_trackvis_to_rasmm(header): - """ Get affine mapping trackvis voxelmm space to RAS+ mm space + """Get affine mapping trackvis voxelmm space to RAS+ mm space The streamlines in a trackvis file are in 'voxelmm' space, where the coordinates refer to the corner of the voxel. @@ -106,7 +105,7 @@ def get_affine_trackvis_to_rasmm(header): # Input header can be dict or structured array if hasattr(vox_order, 'item'): # structured array vox_order = header[Field.VOXEL_ORDER].item() - affine_ornt = "".join(aff2axcodes(header[Field.VOXEL_TO_RASMM])) + affine_ornt = ''.join(aff2axcodes(header[Field.VOXEL_TO_RASMM])) header_ornt = axcodes2ornt(vox_order.decode('latin1').upper()) affine_ornt = axcodes2ornt(affine_ornt) ornt = nib.orientations.ornt_transform(header_ornt, affine_ornt) @@ -125,7 +124,7 @@ def get_affine_rasmm_to_trackvis(header): def encode_value_in_name(value, name, max_name_len=20): - """ Return `name` as fixed-length string, appending `value` as string. + """Return `name` as fixed-length string, appending `value` as string. Form output from `name` if `value <= 1` else `name` + ``\x00`` + str(value). @@ -157,16 +156,18 @@ def encode_value_in_name(value, name, max_name_len=20): raise ValueError(msg) encoded_name = name if value <= 1 else name + '\x00' + str(value) if len(encoded_name) > max_name_len: - msg = (f"Data information named '{name}' is too long (need to be less" - f" than {max_name_len - (len(str(value)) + 1)} characters " - "when storing more than one value for a given data information.") + msg = ( + f"Data information named '{name}' is too long (need to be less" + f' than {max_name_len - (len(str(value)) + 1)} characters ' + 'when storing more than one value for a given data information.' + ) raise ValueError(msg) # Fill to the end with zeros return encoded_name.ljust(max_name_len, '\x00').encode('latin1') def decode_value_from_name(encoded_name): - """ Decodes a value that has been encoded in the last bytes of a string. + """Decodes a value that has been encoded in the last bytes of a string. Check :func:`encode_value_in_name` to see how the value has been encoded. @@ -194,15 +195,17 @@ def decode_value_from_name(encoded_name): value = int(splits[1]) # Decode value. elif len(splits) > 2: # The remaining bytes are not \x00, raising. - msg = (f"Wrong scalar_name or property_name: '{encoded_name}'. " - "Unused characters should be \\x00.") + msg = ( + f"Wrong scalar_name or property_name: '{encoded_name}'. " + 'Unused characters should be \\x00.' + ) raise HeaderError(msg) return name, value class TrkFile(TractogramFile): - """ Convenience class to encapsulate TRK file format. + """Convenience class to encapsulate TRK file format. Notes ----- @@ -216,7 +219,7 @@ class TrkFile(TractogramFile): """ # Constants - MAGIC_NUMBER = b"TRACK" + MAGIC_NUMBER = b'TRACK' HEADER_SIZE = 1000 SUPPORTS_DATA_PER_POINT = True SUPPORTS_DATA_PER_STREAMLINE = True @@ -241,7 +244,7 @@ def __init__(self, tractogram, header=None): @classmethod def is_correct_format(cls, fileobj): - """ Check if the file is in TRK format. + """Check if the file is in TRK format. Parameters ---------- @@ -265,8 +268,7 @@ def is_correct_format(cls, fileobj): @classmethod def _default_structarr(cls, endianness=None): - """ Return an empty compliant TRK header as numpy structured array - """ + """Return an empty compliant TRK header as numpy structured array""" dt = header_2_dtype if endianness is not None: endianness = endian_codes[endianness] @@ -275,10 +277,10 @@ def _default_structarr(cls, endianness=None): # Default values st_arr[Field.MAGIC_NUMBER] = cls.MAGIC_NUMBER - st_arr[Field.VOXEL_SIZES] = np.array((1, 1, 1), dtype="f4") - st_arr[Field.DIMENSIONS] = np.array((1, 1, 1), dtype="h") - st_arr[Field.VOXEL_TO_RASMM] = np.eye(4, dtype="f4") - st_arr[Field.VOXEL_ORDER] = b"RAS" + st_arr[Field.VOXEL_SIZES] = np.array((1, 1, 1), dtype='f4') + st_arr[Field.DIMENSIONS] = np.array((1, 1, 1), dtype='h') + st_arr[Field.VOXEL_TO_RASMM] = np.eye(4, dtype='f4') + st_arr[Field.VOXEL_ORDER] = b'RAS' st_arr['version'] = 2 st_arr['hdr_size'] = cls.HEADER_SIZE @@ -286,14 +288,13 @@ def _default_structarr(cls, endianness=None): @classmethod def create_empty_header(cls, endianness=None): - """ Return an empty compliant TRK header as dict - """ + """Return an empty compliant TRK header as dict""" st_arr = cls._default_structarr(endianness) return dict(zip(st_arr.dtype.names, st_arr.tolist())) @classmethod def load(cls, fileobj, lazy_load=False): - """ Loads streamlines from a filename or file-like object. + """Loads streamlines from a filename or file-like object. Parameters ---------- @@ -357,15 +358,14 @@ def load(cls, fileobj, lazy_load=False): data_per_streamline_slice['properties'] = slice_obj if lazy_load: + def _read(): for pts, scals, props in cls._read(fileobj, hdr): items = data_per_point_slice.items() data_for_points = dict((k, scals[:, v]) for k, v in items) items = data_per_streamline_slice.items() data_for_streamline = dict((k, props[v]) for k, v in items) - yield TractogramItem(pts, - data_for_streamline, - data_for_points) + yield TractogramItem(pts, data_for_streamline, data_for_points) tractogram = LazyTractogram.from_data_func(_read) @@ -381,12 +381,11 @@ def _read(): # Buffer size is in mega bytes. mbytes = size // (1024 * 1024) sizes = [mbytes, 4, 4] - if hdr["nb_scalars_per_point"] > 0: + if hdr['nb_scalars_per_point'] > 0: sizes = [mbytes // 2, mbytes // 2, 4] trk_reader = cls._read(fileobj, hdr) - arr_seqs = create_arraysequences_from_generator(trk_reader, n=3, - buffer_sizes=sizes) + arr_seqs = create_arraysequences_from_generator(trk_reader, n=3, buffer_sizes=sizes) streamlines, scalars, properties = arr_seqs properties = np.asarray(properties) # Actually a 2d array. tractogram = Tractogram(streamlines) @@ -403,7 +402,7 @@ def _read(): return cls(tractogram, header=hdr) def save(self, fileobj): - """ Save tractogram to a filename or file-like object using TRK format. + """Save tractogram to a filename or file-like object using TRK format. Parameters ---------- @@ -422,8 +421,8 @@ def save(self, fileobj): # By default, the voxel order is LPS. # http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates - if header[Field.VOXEL_ORDER] == b"": - header[Field.VOXEL_ORDER] = b"LPS" + if header[Field.VOXEL_ORDER] == b'': + header[Field.VOXEL_ORDER] = b'LPS' # Keep counts for correcting incoherent fields or warn. nb_streamlines = 0 @@ -431,15 +430,15 @@ def save(self, fileobj): nb_scalars = 0 nb_properties = 0 - with Opener(fileobj, mode="wb") as f: + with Opener(fileobj, mode='wb') as f: # Keep track of the beginning of the header. beginning = f.tell() # Write temporary header that we will update at the end f.write(header.tobytes()) - i4_dtype = np.dtype(" MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE: - msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} named " - "data_per_streamline (also known as 'properties' in the " - "TRK format).") + msg = ( + f'Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} named ' + "data_per_streamline (also known as 'properties' in the " + 'TRK format).' + ) raise ValueError(msg) data_for_streamline_keys = sorted(data_for_streamline.keys()) - property_name = np.zeros(MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE, - dtype='S20') + property_name = np.zeros(MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE, dtype='S20') for i, name in enumerate(data_for_streamline_keys): # Append number of values as ascii to zero-terminated name # to encode number of values into trackvis name. @@ -489,9 +489,11 @@ def save(self, fileobj): # Update field 'scalar_name' using 'tractogram.data_per_point'. data_for_points = first_item.data_for_points if len(data_for_points) > MAX_NB_NAMED_SCALARS_PER_POINT: - msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} " - "named data_per_point (also known as 'scalars' in " - "the TRK format).") + msg = ( + f'Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} ' + "named data_per_point (also known as 'scalars' in " + 'the TRK format).' + ) raise ValueError(msg) data_for_points_keys = sorted(data_for_points.keys()) @@ -504,23 +506,27 @@ def save(self, fileobj): header['scalar_name'][:] = scalar_name for t in tractogram: - if any((len(d) != len(t.streamline) - for d in t.data_for_points.values())): - raise DataError("Missing scalars for some points!") + if any((len(d) != len(t.streamline) for d in t.data_for_points.values())): + raise DataError('Missing scalars for some points!') points = np.asarray(t.streamline) - scalars = [np.asarray(t.data_for_points[k]) - for k in data_for_points_keys] - scalars = np.concatenate([np.ndarray((len(points), 0),) - ] + scalars, axis=1) - properties = [np.asarray(t.data_for_streamline[k]) - for k in data_for_streamline_keys] - properties = np.concatenate( - [np.array([])] + properties).astype(f4_dtype) + scalars = [np.asarray(t.data_for_points[k]) for k in data_for_points_keys] + scalars = np.concatenate( + [ + np.ndarray( + (len(points), 0), + ) + ] + + scalars, + axis=1, + ) + properties = [ + np.asarray(t.data_for_streamline[k]) for k in data_for_streamline_keys + ] + properties = np.concatenate([np.array([])] + properties).astype(f4_dtype) data = struct.pack(i4_dtype.str[:-1], len(points)) - pts_scalars = np.concatenate( - [points, scalars], axis=1).astype(f4_dtype) + pts_scalars = np.concatenate([points, scalars], axis=1).astype(f4_dtype) data += pts_scalars.tobytes() data += properties.tobytes() f.write(data) @@ -536,12 +542,11 @@ def save(self, fileobj): # Check for errors if nb_scalars_per_point != int(nb_scalars_per_point): - msg = "Nb. of scalars differs from one point to another!" + msg = 'Nb. of scalars differs from one point to another!' raise DataError(msg) if nb_properties_per_streamline != int(nb_properties_per_streamline): - msg = ("Nb. of properties differs from one streamline to" - " another!") + msg = 'Nb. of properties differs from one streamline to' ' another!' raise DataError(msg) header[Field.NB_STREAMLINES] = nb_streamlines @@ -554,7 +559,7 @@ def save(self, fileobj): @staticmethod def _read_header(fileobj): - """ Reads a TRK header from a file. + """Reads a TRK header from a file. Parameters ---------- @@ -586,21 +591,26 @@ def _read_header(fileobj): # Swap byte order header_rec = header_rec.newbyteorder() if header_rec['hdr_size'] != TrkFile.HEADER_SIZE: - msg = (f"Invalid hdr_size: {header_rec['hdr_size']} " - f"instead of {TrkFile.HEADER_SIZE}") + msg = ( + f"Invalid hdr_size: {header_rec['hdr_size']} " + f'instead of {TrkFile.HEADER_SIZE}' + ) raise HeaderError(msg) if header_rec['version'] == 1: # There is no 4x4 matrix for voxel to RAS transformation. header_rec[Field.VOXEL_TO_RASMM] = np.zeros((4, 4)) elif header_rec['version'] == 3: - warnings.warn('Parsing a TRK v3 file as v2. Some features may not ' - 'be handled correctly.', HeaderWarning) + warnings.warn( + 'Parsing a TRK v3 file as v2. Some features may not ' 'be handled correctly.', + HeaderWarning, + ) elif header_rec['version'] in (2, 3): pass # Nothing more to do. else: - raise HeaderError('NiBabel only supports versions 1 and 2 of ' - 'the Trackvis file format') + raise HeaderError( + 'NiBabel only supports versions 1 and 2 of ' 'the Trackvis file format' + ) # Convert the first record of `header_rec` into a dictionary header = dict(zip(header_rec.dtype.names, header_rec[0])) @@ -609,26 +619,35 @@ def _read_header(fileobj): # If vox_to_ras[3][3] is 0, it means the matrix is not recorded. if header[Field.VOXEL_TO_RASMM][3][3] == 0: header[Field.VOXEL_TO_RASMM] = np.eye(4, dtype=np.float32) - warnings.warn(("Field 'vox_to_ras' in the TRK's header was" - " not recorded. Will continue assuming it's" - " the identity."), HeaderWarning) + warnings.warn( + ( + "Field 'vox_to_ras' in the TRK's header was" + " not recorded. Will continue assuming it's" + ' the identity.' + ), + HeaderWarning, + ) # Check that the 'vox_to_ras' affine is valid, i.e. should be # able to determine the axis directions. axcodes = aff2axcodes(header[Field.VOXEL_TO_RASMM]) if None in axcodes: - msg = ("The 'vox_to_ras' affine is invalid! Could not" - " determine the axis directions from it.\n" - f"{header[Field.VOXEL_TO_RASMM]}") + msg = ( + "The 'vox_to_ras' affine is invalid! Could not" + ' determine the axis directions from it.\n' + f'{header[Field.VOXEL_TO_RASMM]}' + ) raise HeaderError(msg) # By default, the voxel order is LPS. # http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates - if header[Field.VOXEL_ORDER] == b"": - msg = ("Voxel order is not specified, will assume 'LPS' since" - " it is Trackvis software's default.") + if header[Field.VOXEL_ORDER] == b'': + msg = ( + "Voxel order is not specified, will assume 'LPS' since" + " it is Trackvis software's default." + ) warnings.warn(msg, HeaderWarning) - header[Field.VOXEL_ORDER] = b"LPS" + header[Field.VOXEL_ORDER] = b'LPS' # Keep the file position where the data begin. header['_offset_data'] = f.tell() @@ -641,7 +660,7 @@ def _read_header(fileobj): @staticmethod def _read(fileobj, header): - """ Return generator that reads TRK data from `fileobj` given `header` + """Return generator that reads TRK data from `fileobj` given `header` Parameters ---------- @@ -663,20 +682,19 @@ def _read(fileobj, header): * scalars: ndarray of shape (n_pts, nb_scalars_per_point) * properties: ndarray of shape (nb_properties_per_point,) """ - i4_dtype = np.dtype(header[Field.ENDIANNESS] + "i4") - f4_dtype = np.dtype(header[Field.ENDIANNESS] + "f4") + i4_dtype = np.dtype(header[Field.ENDIANNESS] + 'i4') + f4_dtype = np.dtype(header[Field.ENDIANNESS] + 'f4') with Opener(fileobj) as f: start_position = f.tell() - nb_pts_and_scalars = int(3 + - header[Field.NB_SCALARS_PER_POINT]) + nb_pts_and_scalars = int(3 + header[Field.NB_SCALARS_PER_POINT]) pts_and_scalars_size = int(nb_pts_and_scalars * f4_dtype.itemsize) nb_properties = header[Field.NB_PROPERTIES_PER_STREAMLINE] properties_size = int(nb_properties * f4_dtype.itemsize) # Set the file position at the beginning of the data. - f.seek(header["_offset_data"], os.SEEK_SET) + f.seek(header['_offset_data'], os.SEEK_SET) # If 'count' field is 0, i.e. not provided, we have to loop # until the EOF. @@ -700,16 +718,16 @@ def _read(fileobj, header): points_and_scalars = np.ndarray( shape=(nb_pts, nb_pts_and_scalars), dtype=f4_dtype, - buffer=f.read(nb_pts * pts_and_scalars_size)) + buffer=f.read(nb_pts * pts_and_scalars_size), + ) points = points_and_scalars[:, :3] scalars = points_and_scalars[:, 3:] # Read properties properties = np.ndarray( - shape=(nb_properties,), - dtype=f4_dtype, - buffer=f.read(properties_size)) + shape=(nb_properties,), dtype=f4_dtype, buffer=f.read(properties_size) + ) yield points, scalars, properties count += 1 @@ -721,7 +739,7 @@ def _read(fileobj, header): f.seek(start_position, os.SEEK_CUR) def __str__(self): - """ Gets a formatted string of the header of a TRK file. + """Gets a formatted string of the header of a TRK file. Returns ------- @@ -736,19 +754,14 @@ def __str__(self): vars[attr] = vars[hdr_field] nb_scalars = self.header[Field.NB_SCALARS_PER_POINT] - scalar_names = [asstr(s) - for s in vars['scalar_name'][:nb_scalars] - if len(s) > 0] + scalar_names = [asstr(s) for s in vars['scalar_name'][:nb_scalars] if len(s) > 0] vars['scalar_names'] = '\n '.join(scalar_names) nb_properties = self.header[Field.NB_PROPERTIES_PER_STREAMLINE] - property_names = [asstr(s) - for s in vars['property_name'][:nb_properties] - if len(s) > 0] - vars['property_names'] = "\n ".join(property_names) + property_names = [asstr(s) for s in vars['property_name'][:nb_properties] if len(s) > 0] + vars['property_names'] = '\n '.join(property_names) # Make all byte strings into strings # Fixes recursion error on Python 3.3 - vars = dict((k, asstr(v) if hasattr(v, 'decode') else v) - for k, v in vars.items()) + vars = dict((k, asstr(v) if hasattr(v, 'decode') else v) for k, v in vars.items()) return """\ MAGIC NUMBER: {MAGIC_NUMBER} v.{version} @@ -771,4 +784,6 @@ def __str__(self): swap_yz: {swap_yz} swap_zx: {swap_zx} n_count: {NB_STREAMLINES} -hdr_size: {hdr_size}""".format(**vars) +hdr_size: {hdr_size}""".format( + **vars + ) diff --git a/nibabel/streamlines/utils.py b/nibabel/streamlines/utils.py index 085179da9e..80764700f2 100644 --- a/nibabel/streamlines/utils.py +++ b/nibabel/streamlines/utils.py @@ -4,7 +4,7 @@ def get_affine_from_reference(ref): - """ Returns the affine defining the reference space. + """Returns the affine defining the reference space. Parameters ---------- @@ -24,7 +24,7 @@ def get_affine_from_reference(ref): if hasattr(ref, 'shape'): if ref.shape != (4, 4): - msg = "`ref` needs to be a numpy array with shape (4, 4)!" + msg = '`ref` needs to be a numpy array with shape (4, 4)!' raise ValueError(msg) return ref @@ -34,7 +34,7 @@ def get_affine_from_reference(ref): def peek_next(iterable): - """ Peek next element of iterable. + """Peek next element of iterable. Parameters ---------- diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 8c9411ec91..44cc82890b 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utilities for testing """ +"""Utilities for testing""" import re import os @@ -34,7 +34,7 @@ def test_data(subdir=None, fname=None): elif subdir in ('gifti', 'nicom', 'externals'): resource = os.path.join(subdir, 'tests', 'data') else: - raise ValueError(f"Unknown test data directory: {subdir}") + raise ValueError(f'Unknown test data directory: {subdir}') if fname is not None: resource = os.path.join(resource, fname) @@ -47,7 +47,7 @@ def test_data(subdir=None, fname=None): def assert_dt_equal(a, b): - """ Assert two numpy dtype specifiers are equal + """Assert two numpy dtype specifiers are equal Avoids failed comparison between int32 / int64 and intp """ @@ -55,8 +55,7 @@ def assert_dt_equal(a, b): def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): - """ Allclose in integers go all wrong for large integers - """ + """Allclose in integers go all wrong for large integers""" a = np.atleast_1d(a) # 0d arrays cannot be indexed a, b = np.broadcast_arrays(a, b) if match_nans: @@ -78,21 +77,20 @@ def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): def assert_arrays_equal(arrays1, arrays2): - """ Check two iterables yield the same sequence of arrays. """ + """Check two iterables yield the same sequence of arrays.""" for arr1, arr2 in zip_longest(arrays1, arrays2, fillvalue=None): - assert (arr1 is not None and arr2 is not None) + assert arr1 is not None and arr2 is not None assert_array_equal(arr1, arr2) def assert_re_in(regex, c, flags=0): - """Assert that container (list, str, etc) contains entry matching the regex - """ + """Assert that container (list, str, etc) contains entry matching the regex""" if not isinstance(c, (list, tuple)): c = [c] for e in c: if re.match(regex, e, flags=flags): return - raise AssertionError(f"Not a single entry matched {regex!r} in {c!r}") + raise AssertionError(f'Not a single entry matched {regex!r} in {c!r}') def get_fresh_mod(mod_name=__name__): @@ -106,7 +104,7 @@ def get_fresh_mod(mod_name=__name__): class clear_and_catch_warnings(warnings.catch_warnings): - """ Context manager that resets warning registry for catching warnings + """Context manager that resets warning registry for catching warnings Warnings can be slippery, because, whenever a warning is triggered, Python adds a ``__warningregistry__`` member to the *calling* module. This makes @@ -146,6 +144,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): ... warnings.simplefilter('always') ... # do something that raises a warning in np.core.fromnumeric """ + class_modules = () def __init__(self, record=True, modules=()): @@ -171,7 +170,7 @@ def __exit__(self, *exc_info): class error_warnings(clear_and_catch_warnings): - """ Context manager to check for warnings as errors. Usually used with + """Context manager to check for warnings as errors. Usually used with ``assert_raises`` in the with block Examples @@ -183,6 +182,7 @@ class error_warnings(clear_and_catch_warnings): ... print('I consider myself warned') I consider myself warned """ + filter = 'error' def __enter__(self): @@ -192,8 +192,8 @@ def __enter__(self): class suppress_warnings(error_warnings): - """ Version of ``catch_warnings`` class that suppresses warnings - """ + """Version of ``catch_warnings`` class that suppresses warnings""" + filter = 'ignore' @@ -202,12 +202,11 @@ class suppress_warnings(error_warnings): def runif_extra_has(test_str): """Decorator checks to see if NIPY_EXTRA_TESTS env var contains test_str""" - return unittest.skipUnless(test_str in EXTRA_SET, f"Skip {test_str} tests.") + return unittest.skipUnless(test_str in EXTRA_SET, f'Skip {test_str} tests.') def assert_arr_dict_equal(dict1, dict2): - """ Assert that two dicts are equal, where dicts contain arrays - """ + """Assert that two dicts are equal, where dicts contain arrays""" assert set(dict1) == set(dict2) for key, value1 in dict1.items(): value2 = dict2[key] @@ -215,19 +214,20 @@ def assert_arr_dict_equal(dict1, dict2): class BaseTestCase(unittest.TestCase): - """ TestCase that does not attempt to run if prefixed with a ``_`` + """TestCase that does not attempt to run if prefixed with a ``_`` This restores the nose-like behavior of skipping so-named test cases in test runners like pytest. """ + def setUp(self): if self.__class__.__name__.startswith('_'): - raise unittest.SkipTest("Base test case - subclass to run") + raise unittest.SkipTest('Base test case - subclass to run') super().setUp() def expires(version): - "Decorator to mark a test as xfail with ExpiredDeprecationError after version" + """Decorator to mark a test as xfail with ExpiredDeprecationError after version""" from packaging.version import Version from nibabel import __version__ as nbver from nibabel.deprecator import ExpiredDeprecationError diff --git a/nibabel/testing/helpers.py b/nibabel/testing/helpers.py index 49112fddfb..35b13049f1 100644 --- a/nibabel/testing/helpers.py +++ b/nibabel/testing/helpers.py @@ -1,17 +1,18 @@ -""" Helper functions for tests +"""Helper functions for tests """ from io import BytesIO import numpy as np from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy.io') from numpy.testing import assert_array_equal def bytesio_filemap(klass): - """ Return bytes io filemap for this image class `klass` """ + """Return bytes io filemap for this image class `klass`""" file_map = klass.make_file_map() for name, fileholder in file_map.items(): fileholder.fileobj = BytesIO() @@ -20,8 +21,7 @@ def bytesio_filemap(klass): def bytesio_round_trip(img): - """ Save then load image from bytesio - """ + """Save then load image from bytesio""" klass = img.__class__ bytes_map = bytesio_filemap(klass) img.to_file_map(bytes_map) @@ -29,7 +29,7 @@ def bytesio_round_trip(img): def assert_data_similar(arr, params): - """ Check data is the same if recorded, otherwise check summaries + """Check data is the same if recorded, otherwise check summaries Helper function to test image array data `arr` against record in `params`, where record can be the array itself, or summary values from the array. @@ -50,4 +50,5 @@ def assert_data_similar(arr, params): real_arr = np.asarray(arr) assert np.allclose( (real_arr.min(), real_arr.max(), real_arr.mean()), - (summary['min'], summary['max'], summary['mean'])) + (summary['min'], summary['max'], summary['mean']), + ) diff --git a/nibabel/testing/np_features.py b/nibabel/testing/np_features.py index 8919542d1c..c0739a8502 100644 --- a/nibabel/testing/np_features.py +++ b/nibabel/testing/np_features.py @@ -1,11 +1,11 @@ -""" Look for changes in numpy behavior over versions +"""Look for changes in numpy behavior over versions """ import numpy as np def memmap_after_ufunc(): - """ Return True if ufuncs on memmap arrays always return memmap arrays + """Return True if ufuncs on memmap arrays always return memmap arrays This should be True for numpy < 1.12, False otherwise. @@ -20,4 +20,5 @@ def memmap_after_ufunc(): memmap_after_ufunc.result = isinstance(mm_arr + 1, np.memmap) return memmap_after_ufunc.result + memmap_after_ufunc.result = None diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index ffee1f3829..17b36bd6dd 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -1,4 +1,4 @@ -""" Test differences in affines by reslicing +"""Test differences in affines by reslicing Should be run from directory containing .PAR _and_ matching .REC files from Michael's PAR / REC dataset at: @@ -38,13 +38,10 @@ def resample_img2img(img_to, img_from, order=1, out_class=nib.Nifti1Image): raise Exception('Scipy must be installed to run resample_img2img.') from scipy import ndimage as spnd + vox2vox = npl.inv(img_from.affine).dot(img_to.affine) rzs, trans = to_matvec(vox2vox) - data = spnd.affine_transform(img_from.get_fdata(), - rzs, - trans, - img_to.shape, - order=order) + data = spnd.affine_transform(img_from.get_fdata(), rzs, trans, img_to.shape, order=order) return out_class(data, img_to.affine) @@ -56,14 +53,14 @@ def gmean_norm(data): if __name__ == '__main__': np.set_printoptions(suppress=True, precision=4) - normal_fname = "Phantom_EPI_3mm_tra_SENSE_6_1.PAR" + normal_fname = 'Phantom_EPI_3mm_tra_SENSE_6_1.PAR' normal_img = parrec.load(normal_fname) normal_data = normal_img.get_fdata() normal_normed = gmean_norm(normal_data) - print(f"RMS of standard image {normal_fname:<44}: {np.sqrt(np.sum(normal_normed ** 2))}") + print(f'RMS of standard image {normal_fname:<44}: {np.sqrt(np.sum(normal_normed ** 2))}') - for parfile in glob.glob("*.PAR"): + for parfile in glob.glob('*.PAR'): if parfile == normal_fname: continue funny_img = parrec.load(parfile) diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index 477e687224..2d736fb445 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -1,4 +1,4 @@ -""" Generate mask and testing tractogram in known formats: +"""Generate mask and testing tractogram in known formats: * mask: standard.nii.gz * tractogram: @@ -13,7 +13,7 @@ def mark_the_spot(mask): - """ Marks every nonzero voxel using streamlines to form a 3D 'X' inside. + """Marks every nonzero voxel using streamlines to form a 3D 'X' inside. Generates streamlines forming a 3D 'X' inside every nonzero voxel. @@ -27,6 +27,7 @@ def mark_the_spot(mask): list of ndarrays All streamlines needed to mark every nonzero voxel in the `mask`. """ + def _gen_straight_streamline(start, end, steps=3): coords = [] for s, e in zip(start, end): @@ -57,11 +58,11 @@ def _gen_straight_streamline(start, end, steps=3): height = 5 # Sagittal depth = 7 # Axial - voxel_size = np.array((1., 3., 2.)) + voxel_size = np.array((1.0, 3.0, 2.0)) # Generate a random mask with voxel order RAS+. mask = rng.rand(width, height, depth) > 0.8 - mask = (255*mask).astype(np.uint8) + mask = (255 * mask).astype(np.uint8) # Build tractogram streamlines = mark_the_spot(mask) @@ -70,16 +71,18 @@ def _gen_straight_streamline(start, end, steps=3): # Build header affine = np.eye(4) affine[range(3), range(3)] = voxel_size - header = {Field.DIMENSIONS: (width, height, depth), - Field.VOXEL_SIZES: voxel_size, - Field.VOXEL_TO_RASMM: affine, - Field.VOXEL_ORDER: 'RAS'} + header = { + Field.DIMENSIONS: (width, height, depth), + Field.VOXEL_SIZES: voxel_size, + Field.VOXEL_TO_RASMM: affine, + Field.VOXEL_ORDER: 'RAS', + } # Save the standard mask. nii = nib.Nifti1Image(mask, affine=affine) - nib.save(nii, "standard.nii.gz") + nib.save(nii, 'standard.nii.gz') # Save the standard tractogram in every available file format. for ext, cls in FORMATS.items(): tfile = cls(tractogram, header) - nib.streamlines.save(tfile, "standard" + ext) + nib.streamlines.save(tfile, 'standard' + ext) diff --git a/nibabel/tests/data/make_moved_anat.py b/nibabel/tests/data/make_moved_anat.py index ec0817885c..aee20eda97 100644 --- a/nibabel/tests/data/make_moved_anat.py +++ b/nibabel/tests/data/make_moved_anat.py @@ -1,4 +1,4 @@ -""" Make anatomical image with altered affine +"""Make anatomical image with altered affine * Add some rotations and translations to affine; * Save as ``.nii`` file so SPM can read it. @@ -16,8 +16,6 @@ img = nib.load('anatomical.nii') some_rotations = euler2mat(0.1, 0.2, 0.3) extra_affine = from_matvec(some_rotations, [3, 4, 5]) - moved_anat = nib.Nifti1Image(img.dataobj, - extra_affine.dot(img.affine), - img.header) + moved_anat = nib.Nifti1Image(img.dataobj, extra_affine.dot(img.affine), img.header) moved_anat.set_data_dtype(np.float32) nib.save(moved_anat, 'anat_moved.nii') diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 663d7845a8..06e5540674 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -1,4 +1,4 @@ -""" Functions / decorators for finding / requiring nibabel-data directory +"""Functions / decorators for finding / requiring nibabel-data directory """ from os import environ, listdir @@ -8,7 +8,7 @@ def get_nibabel_data(): - """ Return path to nibabel-data or empty string if missing + """Return path to nibabel-data or empty string if missing First use ``NIBABEL_DATA_DIR`` environment variable. @@ -24,7 +24,7 @@ def get_nibabel_data(): def needs_nibabel_data(subdir=None): - """ Decorator for tests needing nibabel-data + """Decorator for tests needing nibabel-data Parameters ---------- @@ -39,11 +39,10 @@ def needs_nibabel_data(subdir=None): """ nibabel_data = get_nibabel_data() if nibabel_data == '': - return unittest.skip("Need nibabel-data directory for this test") + return unittest.skip('Need nibabel-data directory for this test') if subdir is None: return lambda x: x required_path = pjoin(nibabel_data, subdir) # Path should not be empty (as is the case for not-updated submodules) have_files = exists(required_path) and len(listdir(required_path)) > 0 - return unittest.skipUnless(have_files, - f"Need files in {required_path} for these tests") + return unittest.skipUnless(have_files, f'Need files in {required_path} for these tests') diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 1bffd01929..474eeceb2c 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -1,4 +1,4 @@ -""" Module to help tests check script output +"""Module to help tests check script output Provides class to be instantiated in tests that check scripts. Usually works something like this in a test module:: @@ -14,7 +14,7 @@ """ import sys import os -from os.path import (dirname, join as pjoin, isfile, isdir, realpath, pathsep) +from os.path import dirname, join as pjoin, isfile, isdir, realpath, pathsep from subprocess import Popen, PIPE @@ -23,8 +23,7 @@ def local_script_dir(script_sdir): - """ Get local script directory if running in development dir, else None - """ + """Get local script directory if running in development dir, else None""" # Check for presence of scripts in development directory. ``realpath`` # allows for the situation where the development directory has been linked # into the path. @@ -37,8 +36,7 @@ def local_script_dir(script_sdir): def local_module_dir(module_name): - """ Get local module directory if running in development dir, else None - """ + """Get local module directory if running in development dir, else None""" mod = __import__(module_name) containing_path = dirname(dirname(realpath(mod.__file__))) if containing_path == realpath(os.getcwd()): @@ -47,19 +45,20 @@ def local_module_dir(module_name): class ScriptRunner: - """ Class to run scripts and return output + """Class to run scripts and return output Finds local scripts and local modules if running in the development directory, otherwise finds system scripts and modules. """ - def __init__(self, - script_sdir='scripts', - module_sdir=MY_PACKAGE, - debug_print_var=None, - output_processor=lambda x: x - ): - """ Init ScriptRunner instance + def __init__( + self, + script_sdir='scripts', + module_sdir=MY_PACKAGE, + debug_print_var=None, + output_processor=lambda x: x, + ): + """Init ScriptRunner instance Parameters ---------- @@ -85,7 +84,7 @@ def __init__(self, self.output_processor = output_processor def run_command(self, cmd, check_code=True): - """ Run command sequence `cmd` returning exit code, stdout, stderr + """Run command sequence `cmd` returning exit code, stdout, stderr Parameters ---------- @@ -113,8 +112,7 @@ def run_command(self, cmd, check_code=True): # Unix, we might have the wrong incantation for the Python interpreter # in the hash bang first line in the source file. So, either way, run # the script through the Python interpreter - cmd = [sys.executable, - pjoin(self.local_script_dir, cmd[0])] + cmd[1:] + cmd = [sys.executable, pjoin(self.local_script_dir, cmd[0])] + cmd[1:] if os.name == 'nt': # Quote any arguments with spaces. The quotes delimit the arguments # on Windows, and the arguments might be file paths with spaces. @@ -146,6 +144,7 @@ def run_command(self, cmd, check_code=True): stderr ------ {stderr} - """) + """ + ) opp = self.output_processor return proc.returncode, opp(stdout), opp(stderr) diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 0209ff3e69..08166df6e8 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -6,14 +6,22 @@ import numpy as np from ..eulerangles import euler2mat -from ..affines import (AffineError, apply_affine, append_diag, to_matvec, - from_matvec, dot_reduce, voxel_sizes, obliquity, rescale_affine) +from ..affines import ( + AffineError, + apply_affine, + append_diag, + to_matvec, + from_matvec, + dot_reduce, + voxel_sizes, + obliquity, + rescale_affine, +) from ..orientations import aff2axcodes import pytest -from numpy.testing import assert_array_equal, assert_almost_equal, \ - assert_array_almost_equal +from numpy.testing import assert_array_equal, assert_almost_equal, assert_array_almost_equal def validated_apply_affine(T, xyz): @@ -33,11 +41,9 @@ def test_apply_affine(): rng = np.random.RandomState(20110903) aff = np.diag([2, 3, 4, 1]) pts = rng.uniform(size=(4, 3)) - assert_array_equal(apply_affine(aff, pts), - pts * [[2, 3, 4]]) + assert_array_equal(apply_affine(aff, pts), pts * [[2, 3, 4]]) aff[:3, 3] = [10, 11, 12] - assert_array_equal(apply_affine(aff, pts), - pts * [[2, 3, 4]] + [[10, 11, 12]]) + assert_array_equal(apply_affine(aff, pts), pts * [[2, 3, 4]] + [[10, 11, 12]]) aff[:3, :] = rng.normal(size=(3, 4)) exp_res = np.concatenate((pts.T, np.ones((1, 4))), axis=0) exp_res = np.dot(aff, exp_res)[:3, :].T @@ -103,35 +109,29 @@ def test_matrix_vector(): def test_append_diag(): # Routine for appending diagonal elements - assert_array_equal(append_diag(np.diag([2, 3, 1]), [1]), - np.diag([2, 3, 1, 1])) - assert_array_equal(append_diag(np.diag([2, 3, 1]), [1, 1]), - np.diag([2, 3, 1, 1, 1])) - aff = np.array([[2, 0, 0], - [0, 3, 0], - [0, 0, 1], - [0, 0, 1]]) - assert_array_equal(append_diag(aff, [5], [9]), - [[2, 0, 0, 0], - [0, 3, 0, 0], - [0, 0, 0, 1], - [0, 0, 5, 9], - [0, 0, 0, 1]]) - assert_array_equal(append_diag(aff, [5, 6], [9, 10]), - [[2, 0, 0, 0, 0], - [0, 3, 0, 0, 0], - [0, 0, 0, 0, 1], - [0, 0, 5, 0, 9], - [0, 0, 0, 6, 10], - [0, 0, 0, 0, 1]]) - aff = np.array([[2, 0, 0, 0], - [0, 3, 0, 0], - [0, 0, 0, 1]]) - assert_array_equal(append_diag(aff, [5], [9]), - [[2, 0, 0, 0, 0], - [0, 3, 0, 0, 0], - [0, 0, 0, 5, 9], - [0, 0, 0, 0, 1]]) + assert_array_equal(append_diag(np.diag([2, 3, 1]), [1]), np.diag([2, 3, 1, 1])) + assert_array_equal(append_diag(np.diag([2, 3, 1]), [1, 1]), np.diag([2, 3, 1, 1, 1])) + aff = np.array([[2, 0, 0], [0, 3, 0], [0, 0, 1], [0, 0, 1]]) + assert_array_equal( + append_diag(aff, [5], [9]), + [[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1], [0, 0, 5, 9], [0, 0, 0, 1]], + ) + assert_array_equal( + append_diag(aff, [5, 6], [9, 10]), + [ + [2, 0, 0, 0, 0], + [0, 3, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 0, 5, 0, 9], + [0, 0, 0, 6, 10], + [0, 0, 0, 0, 1], + ], + ) + aff = np.array([[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1]]) + assert_array_equal( + append_diag(aff, [5], [9]), + [[2, 0, 0, 0, 0], [0, 3, 0, 0, 0], [0, 0, 0, 5, 9], [0, 0, 0, 0, 1]], + ) # Length of starts has to match length of steps with pytest.raises(AffineError): append_diag(aff, [5, 6], [9]) @@ -152,10 +152,15 @@ def test_dot_reduce(): assert_array_equal(dot_reduce(vec, mat), np.dot(vec, mat)) assert_array_equal(dot_reduce(mat, vec), np.dot(mat, vec)) mat2 = np.arange(13, 22).reshape((3, 3)) - assert_array_equal(dot_reduce(mat2, vec, mat), - np.dot(mat2, np.dot(vec, mat))) - assert_array_equal(dot_reduce(mat, vec, mat2, ), - np.dot(mat, np.dot(vec, mat2))) + assert_array_equal(dot_reduce(mat2, vec, mat), np.dot(mat2, np.dot(vec, mat))) + assert_array_equal( + dot_reduce( + mat, + vec, + mat2, + ), + np.dot(mat, np.dot(vec, mat2)), + ) def test_voxel_sizes(): @@ -177,8 +182,7 @@ def test_voxel_sizes(): new_row = np.vstack((np.zeros(n + 1), aff)) assert_almost_equal(voxel_sizes(new_row), vox_sizes) new_col = np.c_[np.zeros(n + 1), aff] - assert_almost_equal(voxel_sizes(new_col), - [0] + list(vox_sizes)) + assert_almost_equal(voxel_sizes(new_col), [0] + list(vox_sizes)) if n < 3: continue # Rotations do not change the voxel size @@ -192,13 +196,13 @@ def test_voxel_sizes(): def test_obliquity(): """Check the calculation of inclination of an affine axes.""" from math import pi + aligned = np.diag([2.0, 2.0, 2.3, 1.0]) aligned[:-1, -1] = [-10, -10, -7] R = from_matvec(euler2mat(x=0.09, y=0.001, z=0.001), [0.0, 0.0, 0.0]) oblique = R.dot(aligned) assert_almost_equal(obliquity(aligned), [0.0, 0.0, 0.0]) - assert_almost_equal(obliquity(oblique) * 180 / pi, - [0.0810285, 5.1569949, 5.1569376]) + assert_almost_equal(obliquity(oblique) * 180 / pi, [0.0810285, 5.1569949, 5.1569376]) def test_rescale_affine(): diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 7f32e2d8a7..2cea69413f 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test Analyze headers +"""Test Analyze headers See test_wrapstruct.py for tests of the wrapped structarr-ness of the Analyze header @@ -21,8 +21,7 @@ import numpy as np from io import BytesIO, StringIO -from ..spatialimages import (HeaderDataError, HeaderTypeError, - supported_np_types) +from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types from ..analyze import AnalyzeHeader, AnalyzeImage from ..nifti1 import Nifti1Header from ..loadsave import read_img_data @@ -33,15 +32,20 @@ from ..optpkg import optional_package import pytest -from numpy.testing import (assert_array_equal, assert_array_almost_equal) +from numpy.testing import assert_array_equal, assert_array_almost_equal -from ..testing import (data_path, suppress_warnings, assert_dt_equal, - bytesio_filemap, bytesio_round_trip) +from ..testing import ( + data_path, + suppress_warnings, + assert_dt_equal, + bytesio_filemap, + bytesio_round_trip, +) from . import test_wrapstruct as tws from . import test_spatialimages as tsi -HAVE_ZSTD = optional_package("pyzstd")[1] +HAVE_ZSTD = optional_package('pyzstd')[1] header_file = os.path.join(data_path, 'analyze.hdr') @@ -60,12 +64,7 @@ class TestAnalyzeHeader(tws._TestLabeledWrapStruct): header_class = AnalyzeHeader example_file = header_file sizeof_hdr = AnalyzeHeader.sizeof_hdr - supported_np_types = set((np.uint8, - np.int16, - np.int32, - np.float32, - np.float64, - np.complex64)) + supported_np_types = set((np.uint8, np.int16, np.int32, np.float32, np.float64, np.complex64)) add_intp(supported_np_types) def test_supported_types(self): @@ -88,8 +87,7 @@ def test_general_init(self): # is True (which it is by default). We have to be careful of the # translations though - these arise from SPM's use of the origin # field, and the center of the image. - assert_array_equal(np.diag(hdr.get_base_affine()), - [-1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-1, 1, 1, 1]) # But zooms only go with number of dimensions assert hdr.get_zooms() == (1.0,) @@ -144,8 +142,10 @@ def test_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 30) assert fhdr['sizeof_hdr'] == self.sizeof_hdr - assert (message == f'sizeof_hdr should be {self.sizeof_hdr}; ' - f'set sizeof_hdr to {self.sizeof_hdr}') + assert ( + message == f'sizeof_hdr should be {self.sizeof_hdr}; ' + f'set sizeof_hdr to {self.sizeof_hdr}' + ) pytest.raises(*raiser) # RGB datatype does not raise error hdr = HC() @@ -195,8 +195,10 @@ def test_pixdim_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 35) assert fhdr['pixdim'][1] == 1 assert fhdr['pixdim'][2] == 2 - assert message == ('pixdim[1,2,3] should be non-zero and pixdim[1,2,3] should be ' - 'positive; setting 0 dims to 1 and setting to abs of pixdim values') + assert message == ( + 'pixdim[1,2,3] should be non-zero and pixdim[1,2,3] should be ' + 'positive; setting 0 dims to 1 and setting to abs of pixdim values' + ) pytest.raises(*raiser) def test_no_scaling_fixes(self): @@ -238,8 +240,9 @@ def test_logger_error(self): # Check log message appears in new logger imageglobals.logger = logger hdr.copy().check_fix() - assert str_io.getvalue() == ('bitpix does not match datatype; ' - 'setting bitpix to match datatype\n') + assert str_io.getvalue() == ( + 'bitpix does not match datatype; ' 'setting bitpix to match datatype\n' + ) # Check that error_level in fact causes error to be raised imageglobals.error_level = 10 with pytest.raises(HeaderDataError): @@ -250,15 +253,15 @@ def test_logger_error(self): def test_data_dtype(self): # check getting and setting of data type # codes / types supported by all binary headers - all_supported_types = ((2, np.uint8), - (4, np.int16), - (8, np.int32), - (16, np.float32), - (32, np.complex64), - (64, np.float64), - (128, np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1')]))) + all_supported_types = ( + (2, np.uint8), + (4, np.int16), + (8, np.int32), + (16, np.float32), + (32, np.complex64), + (64, np.float64), + (128, np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')])), + ) # and unsupported - here using some labels instead all_unsupported_types = (np.void, 'none', 'all', 0) @@ -266,6 +269,7 @@ def assert_set_dtype(dt_spec, np_dtype): hdr = self.header_class() hdr.set_data_dtype(dt_spec) assert_dt_equal(hdr.get_data_dtype(), np_dtype) + # Test code, type known to be supported by all types for code, npt in all_supported_types: # Can set with code value @@ -461,23 +465,19 @@ def test_data_shape_zooms_affine(self): hdr.set_data_shape((1, 2, 3)) assert_array_equal(hdr.get_zooms(), (4, 5, 1)) # Setting zooms changes affine - assert_array_equal(np.diag(hdr.get_base_affine()), - [-4, 5, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-4, 5, 1, 1]) hdr.set_zooms((1, 1, 1)) - assert_array_equal(np.diag(hdr.get_base_affine()), - [-1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-1, 1, 1, 1]) def test_default_x_flip(self): hdr = self.header_class() hdr.default_x_flip = True hdr.set_data_shape((1, 2, 3)) hdr.set_zooms((1, 1, 1)) - assert_array_equal(np.diag(hdr.get_base_affine()), - [-1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-1, 1, 1, 1]) hdr.default_x_flip = False # Check avoids translations - assert_array_equal(np.diag(hdr.get_base_affine()), - [1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [1, 1, 1, 1]) def test_from_eg_file(self): fileobj = open(self.example_file, 'rb') @@ -524,12 +524,15 @@ def test_from_header(self): assert hdr is not copy class C: + def get_data_dtype(self): + return np.dtype('i2') - def get_data_dtype(self): return np.dtype('i2') + def get_data_shape(self): + return (5, 4, 3) - def get_data_shape(self): return (5, 4, 3) + def get_zooms(self): + return (10.0, 9.0, 8.0) - def get_zooms(self): return (10.0, 9.0, 8.0) converted = klass.from_header(C()) assert isinstance(converted, klass) assert converted.get_data_dtype() == np.dtype('i2') @@ -544,24 +547,33 @@ def test_base_affine(self): assert hdr.default_x_flip assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr.set_data_shape((3, 5)) assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -0.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr.set_data_shape((3, 5, 7)) assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) def test_scaling(self): # Test integer scaling from float @@ -598,15 +610,17 @@ def test_scaling(self): def test_slope_inter(self): hdr = self.header_class() assert hdr.get_slope_inter() == (None, None) - for slinter in ((None,), - (None, None), - (np.nan, np.nan), - (np.nan, None), - (None, np.nan), - (1.0,), - (1.0, None), - (None, 0), - (1.0, 0)): + for slinter in ( + (None,), + (None, None), + (np.nan, np.nan), + (np.nan, None), + (None, np.nan), + (1.0,), + (1.0, None), + (None, 0), + (1.0, 0), + ): hdr.set_slope_inter(*slinter) assert hdr.get_slope_inter() == (None, None) with pytest.raises(HeaderTypeError): @@ -621,27 +635,28 @@ def test_from_analyze_map(self): class H1: pass + with pytest.raises(AttributeError): klass.from_header(H1()) class H2: - def get_data_dtype(self): return np.dtype('u1') + with pytest.raises(AttributeError): klass.from_header(H2()) class H3(H2): - def get_data_shape(self): return (2, 3, 4) + with pytest.raises(AttributeError): klass.from_header(H3()) class H4(H3): - def get_zooms(self): - return 4., 5., 6. + return 4.0, 5.0, 6.0 + exp_hdr = klass() exp_hdr.set_data_dtype(np.dtype('u1')) exp_hdr.set_data_shape((2, 3, 4)) @@ -650,30 +665,29 @@ def get_zooms(self): # cal_max, cal_min get properly set from ``as_analyze_map`` class H5(H4): - def as_analyze_map(self): return dict(cal_min=-100, cal_max=100) + exp_hdr['cal_min'] = -100 exp_hdr['cal_max'] = 100 assert klass.from_header(H5()) == exp_hdr # set_* methods override fields from header class H6(H5): - def as_analyze_map(self): - return dict(datatype=4, bitpix=32, - cal_min=-100, cal_max=100) + return dict(datatype=4, bitpix=32, cal_min=-100, cal_max=100) + assert klass.from_header(H6()) == exp_hdr # Any mapping will do, including a Nifti header class H7(H5): - def as_analyze_map(self): n_hdr = Nifti1Header() n_hdr.set_data_dtype(np.dtype('i2')) n_hdr['cal_min'] = -100 n_hdr['cal_max'] = 100 return n_hdr + # Values from methods still override values from header (shape, dtype, # zooms still at defaults from n_hdr header fields above) assert klass.from_header(H7()) == exp_hdr diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 54c1c0fd95..2382847da4 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,16 +1,17 @@ -""" Metaclass and class for validating instance APIs +"""Metaclass and class for validating instance APIs """ import os import pytest class validator2test(type): - """ Wrap ``validator_*`` methods with test method testing instances + """Wrap ``validator_*`` methods with test method testing instances * Find methods with names starting with 'validate_' * Create test method with same name * Test method iterates, running validate method over all obj, param pairs """ + def __new__(mcs, name, bases, dict): klass = type.__new__(mcs, name, bases, dict) @@ -18,11 +19,13 @@ def make_test(name, validator): def meth(self): for imaker, params in self.obj_params(): validator(self, imaker, params) - meth.__name__ = 'test_' + name[len('validate_'):] + + meth.__name__ = 'test_' + name[len('validate_') :] meth.__doc__ = f'autogenerated test from {klass.__name__}.{name}' if hasattr(validator, 'pytestmark'): meth.pytestmark = validator.pytestmark return meth + for name in dir(klass): if not name.startswith('validate_'): continue @@ -33,7 +36,7 @@ def meth(self): class ValidateAPI(metaclass=validator2test): - """ A class to validate APIs + """A class to validate APIs Your job is twofold: @@ -53,10 +56,10 @@ class ValidateAPI(metaclass=validator2test): class TestValidateSomething(ValidateAPI): - """ Example implementing an API validator test class """ + """Example implementing an API validator test class""" def obj_params(self): - """ Iterator returning (obj, params) pairs + """Iterator returning (obj, params) pairs ``obj`` is some instance for which we want to check the API. @@ -64,8 +67,8 @@ def obj_params(self): against ``obj``. See the :meth:`validate_something` method for an example. """ - class C: + class C: def __init__(self, var): self.var = var @@ -76,7 +79,7 @@ def get_var(self): yield C('easypeasy'), {'var': 'easypeasy'} def validate_something(self, obj, params): - """ Do some checks of the `obj` API against `params` + """Do some checks of the `obj` API against `params` The metaclass sets up a ``test_something`` function that runs these checks on each ( @@ -86,14 +89,15 @@ def validate_something(self, obj, params): @pytest.mark.xfail( - os.getenv("PYTEST_XDIST_WORKER") is not None, - reason="Execution in the same scope cannot be guaranteed" + os.getenv('PYTEST_XDIST_WORKER') is not None, + reason='Execution in the same scope cannot be guaranteed', ) class TestRunAllTests(ValidateAPI): - """ Class to test that each validator test gets run + """Class to test that each validator test gets run We check this in the module teardown function """ + run_tests = [] def obj_params(self): diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index eb296b516f..e4d16e7dd8 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for arrayproxy module +"""Tests for arrayproxy module """ import warnings @@ -21,7 +21,7 @@ import numpy as np from .. import __version__ -from ..arrayproxy import (ArrayProxy, is_proxy, reshape_dataobj, get_obj_dtype) +from ..arrayproxy import ArrayProxy, is_proxy, reshape_dataobj, get_obj_dtype from ..openers import ImageOpener from ..nifti1 import Nifti1Header from ..deprecator import ExpiredDeprecationError @@ -37,7 +37,6 @@ class FunkyHeader: - def __init__(self, shape): self.shape = shape @@ -114,18 +113,14 @@ def test_tuplespec(): bio.write(arr.tobytes(order='F')) # Create equivalent header and tuple specs hdr = FunkyHeader(shape) - tuple_spec = (hdr.get_data_shape(), hdr.get_data_dtype(), - hdr.get_data_offset(), 1., 0.) + tuple_spec = (hdr.get_data_shape(), hdr.get_data_dtype(), hdr.get_data_offset(), 1.0, 0.0) ap_header = ArrayProxy(bio, hdr) ap_tuple = ArrayProxy(bio, tuple_spec) # Header and tuple specs produce identical behavior for prop in ('shape', 'dtype', 'offset', 'slope', 'inter', 'is_proxy'): assert getattr(ap_header, prop) == getattr(ap_tuple, prop) - for method, args in (('get_unscaled', ()), ('__array__', ()), - ('__getitem__', ((0, 2, 1), )) - ): - assert_array_equal(getattr(ap_header, method)(*args), - getattr(ap_tuple, method)(*args)) + for method, args in (('get_unscaled', ()), ('__array__', ()), ('__getitem__', ((0, 2, 1),))): + assert_array_equal(getattr(ap_header, method)(*args), getattr(ap_tuple, method)(*args)) # Partial tuples of length 2-4 are also valid for n in range(2, 5): ArrayProxy(bio, tuple_spec[:n]) @@ -167,8 +162,8 @@ def test_nifti1_init(): assert_array_equal(np.asarray(ap), arr * 2.0 + 10) -@pytest.mark.parametrize("n_dim", (1, 2, 3)) -@pytest.mark.parametrize("offset", (0, 20)) +@pytest.mark.parametrize('n_dim', (1, 2, 3)) +@pytest.mark.parametrize('offset', (0, 20)) def test_proxy_slicing(n_dim, offset): shape = (15, 16, 17)[:n_dim] arr = np.arange(np.prod(shape)).reshape(shape) @@ -203,7 +198,7 @@ def test_proxy_slicing_with_scaling(): assert_array_equal(arr[sliceobj] * 2.0 + 1.0, prox[sliceobj]) -@pytest.mark.parametrize("order", ("C", "F")) +@pytest.mark.parametrize('order', ('C', 'F')) def test_order_override(order): shape = (15, 16, 17) arr = np.arange(np.prod(shape)).reshape(shape) @@ -260,6 +255,7 @@ def test_is_proxy(): class NP: is_proxy = False + assert not is_proxy(NP()) @@ -272,21 +268,17 @@ def test_reshape_dataobj(): arr = np.arange(np.prod(shape), dtype=prox.dtype).reshape(shape) bio.write(b'\x00' * prox.offset + arr.tobytes(order='F')) assert_array_equal(prox, arr) - assert_array_equal(reshape_dataobj(prox, (2, 3, 4)), - np.reshape(arr, (2, 3, 4))) + assert_array_equal(reshape_dataobj(prox, (2, 3, 4)), np.reshape(arr, (2, 3, 4))) assert prox.shape == shape assert arr.shape == shape - assert_array_equal(reshape_dataobj(arr, (2, 3, 4)), - np.reshape(arr, (2, 3, 4))) + assert_array_equal(reshape_dataobj(arr, (2, 3, 4)), np.reshape(arr, (2, 3, 4))) assert arr.shape == shape class ArrGiver: - def __array__(self): return arr - assert_array_equal(reshape_dataobj(ArrGiver(), (2, 3, 4)), - np.reshape(arr, (2, 3, 4))) + assert_array_equal(reshape_dataobj(ArrGiver(), (2, 3, 4)), np.reshape(arr, (2, 3, 4))) assert arr.shape == shape @@ -326,15 +318,16 @@ def test_get_obj_dtype(): class ArrGiver: def __array__(self): return arr + assert get_obj_dtype(ArrGiver()) == np.dtype('int16') def test_get_unscaled(): # Test fetch of raw array class FunkyHeader2(FunkyHeader): - def get_slope_inter(self): return 2.1, 3.14 + shape = (2, 3, 4) hdr = FunkyHeader2(shape) bio = BytesIO() @@ -354,10 +347,8 @@ def test_mmap(): check_mmap(hdr, hdr.get_data_offset(), ArrayProxy) -def check_mmap(hdr, offset, proxy_class, - has_scaling=False, - unscaled_is_view=True): - """ Assert that array proxies return memory maps as expected +def check_mmap(hdr, offset, proxy_class, has_scaling=False, unscaled_is_view=True): + """Assert that array proxies return memory maps as expected Parameters ---------- @@ -392,14 +383,15 @@ def check_mmap(hdr, offset, proxy_class, fobj.write(b' ' * offset) fobj.write(arr.tobytes(order='F')) for mmap, expected_mode in ( - # mmap value, expected memmap mode - # mmap=None -> no mmap value - # expected mode=None -> no memmap returned - (None, 'c'), - (True, 'c'), - ('c', 'c'), - ('r', 'r'), - (False, None)): + # mmap value, expected memmap mode + # mmap=None -> no mmap value + # expected mode=None -> no memmap returned + (None, 'c'), + (True, 'c'), + ('c', 'c'), + ('r', 'r'), + (False, None), + ): kwargs = {} if mmap is not None: kwargs['mmap'] = mmap @@ -407,7 +399,7 @@ def check_mmap(hdr, offset, proxy_class, unscaled = prox.get_unscaled() back_data = np.asanyarray(prox) unscaled_is_mmap = isinstance(unscaled, np.memmap) - back_is_mmap = isinstance(back_data, np.memmap) + back_is_mmap = isinstance(back_data, np.memmap) if expected_mode is None: assert not unscaled_is_mmap assert not back_is_mmap @@ -431,6 +423,7 @@ def check_mmap(hdr, offset, proxy_class, # created class CountingImageOpener(ImageOpener): num_openers = 0 + def __init__(self, *args, **kwargs): super(CountingImageOpener, self).__init__(*args, **kwargs) CountingImageOpener.num_openers += 1 @@ -472,32 +465,32 @@ def test_keep_file_open_true_false_invalid(): # - expected value for internal ArrayProxy._keep_file_open flag tests = [ # open file handle - kfo and have_igzip are both irrelevant - ('open', False, False, False, False), - ('open', False, True, False, False), - ('open', True, False, False, False), - ('open', True, True, False, False), + ('open', False, False, False, False), + ('open', False, True, False, False), + ('open', True, False, False, False), + ('open', True, True, False, False), # non-gzip file - have_igzip is irrelevant, decision should be made # solely from kfo flag - ('bin', False, False, False, False), - ('bin', False, True, False, False), - ('bin', True, False, True, True), - ('bin', True, True, True, True), + ('bin', False, False, False, False), + ('bin', False, True, False, False), + ('bin', True, False, True, True), + ('bin', True, True, True, True), # gzip file. If igzip is present, we persist the ImageOpener. - ('gz', False, False, False, False), - ('gz', False, True, True, False), - ('gz', True, False, True, True), - ('gz', True, True, True, True), - ] + ('gz', False, False, False, False), + ('gz', False, True, True, False), + ('gz', True, False, True, True), + ('gz', True, True, True, True), + ] dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) + data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) voxels = np.random.randint(0, 10, (10, 3)) for test in tests: filetype, kfo, have_igzip, exp_persist, exp_kfo = test - with InTemporaryDirectory(), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), \ - patch_indexed_gzip(have_igzip): + with InTemporaryDirectory(), mock.patch( + 'nibabel.openers.ImageOpener', CountingImageOpener + ), patch_indexed_gzip(have_igzip): fname = f'testdata.{filetype}' # create the test data file if filetype == 'gz': @@ -516,8 +509,7 @@ def test_keep_file_open_true_false_invalid(): fobj1 = fname fobj2 = fname try: - proxy = ArrayProxy(fobj1, ((10, 10, 10), dtype), - keep_file_open=kfo) + proxy = ArrayProxy(fobj1, ((10, 10, 10), dtype), keep_file_open=kfo) # We also test that we get the same behaviour when the # KEEP_FILE_OPEN_DEFAULT flag is changed with patch_keep_file_open_default(kfo): @@ -560,8 +552,7 @@ def test_keep_file_open_true_false_invalid(): for invalid_kfo in (55, 'auto', 'cauto'): with pytest.raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open=invalid_kfo) + ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=invalid_kfo) with patch_keep_file_open_default(invalid_kfo): with pytest.raises(ValueError): ArrayProxy(fname, ((10, 10, 10), dtype)) diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index de55cd334b..1fbaa38916 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -1,4 +1,4 @@ -""" Testing array writer objects +"""Testing array writer objects See docstring of :mod:`nibabel.arraywriters` for API. """ @@ -8,9 +8,15 @@ import numpy as np from io import BytesIO -from ..arraywriters import (SlopeInterArrayWriter, SlopeArrayWriter, - WriterError, ScalingError, ArrayWriter, - make_array_writer, get_slope_inter) +from ..arraywriters import ( + SlopeInterArrayWriter, + SlopeArrayWriter, + WriterError, + ScalingError, + ArrayWriter, + make_array_writer, + get_slope_inter, +) from ..casting import int_abs, type_info, shared_range, on_powerpc from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max @@ -101,13 +107,11 @@ def test_arraywriter_check_scaling(): def test_no_scaling(): # Test arraywriter when writing different types without scaling for in_dtype, out_dtype, awt in itertools.product( - NUMERIC_TYPES, - NUMERIC_TYPES, - (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): + NUMERIC_TYPES, NUMERIC_TYPES, (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter) + ): mn_in, mx_in = _dt_min_max(in_dtype) arr = np.array([mn_in, 0, 1, mx_in], dtype=in_dtype) - kwargs = (dict(check_scaling=False) if awt == ArrayWriter - else dict(calc_scale=False)) + kwargs = dict(check_scaling=False) if awt == ArrayWriter else dict(calc_scale=False) aw = awt(arr, out_dtype, **kwargs) with suppress_warnings(): back_arr = round_trip(aw) @@ -127,8 +131,7 @@ def test_no_scaling(): exp_back = np.clip(exp_back, 0, 1) else: # Clip to shared range of working precision - exp_back = np.clip(exp_back, - *shared_range(float, out_dtype)) + exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) else: # iu input and output type # No scaling, never gets converted to float. # Does get clipped to range of output type @@ -136,9 +139,7 @@ def test_no_scaling(): if (mn_in, mx_in) != (mn_out, mx_out): # Use smaller of input, output range to avoid np.clip # upcasting the array because of large clip limits. - exp_back = np.clip(exp_back, - max(mn_in, mn_out), - min(mx_in, mx_out)) + exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) elif in_dtype in COMPLEX_TYPES: # always cast to real from complex with suppress_warnings(): @@ -244,16 +245,14 @@ def test_special_rt(): ArrayWriter(in_arr, out_dtt) aw = ArrayWriter(in_arr, out_dtt, check_scaling=False) mn, mx = shared_range(float, out_dtt) - assert np.allclose(round_trip(aw).astype(float), - [mx, 0, mn]) + assert np.allclose(round_trip(aw).astype(float), [mx, 0, mn]) for klass in (SlopeArrayWriter, SlopeInterArrayWriter): aw = klass(in_arr, out_dtt) assert get_slope_inter(aw) == (1, 0) assert_array_equal(round_trip(aw), 0) for in_dtt, out_dtt, awt in itertools.product( - FLOAT_TYPES, - IUINT_TYPES, - (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): + FLOAT_TYPES, IUINT_TYPES, (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter) + ): arr = np.zeros((3,), dtype=in_dtt) aw = awt(arr, out_dtt) assert get_slope_inter(aw) == (1, 0) @@ -365,8 +364,10 @@ def test_calculate_scale(): def test_resets(): # Test reset of values, caching of scales - for klass, inp, outp in ((SlopeInterArrayWriter, (1, 511), (2.0, 1.0)), - (SlopeArrayWriter, (0, 510), (2.0, 0.0))): + for klass, inp, outp in ( + (SlopeInterArrayWriter, (1, 511), (2.0, 1.0)), + (SlopeArrayWriter, (0, 510), (2.0, 0.0)), + ): arr = np.array(inp) outp = np.array(outp) aw = klass(arr, np.uint8) @@ -390,13 +391,15 @@ def test_no_offset_scale(): # Specific tests of no-offset scaling SAW = SlopeArrayWriter # Floating point - for data in ((-128, 127), - (-128, 126), - (-128, -127), - (-128, 0), - (-128, -1), - (126, 127), - (-127, 127)): + for data in ( + (-128, 127), + (-128, 126), + (-128, -127), + (-128, 0), + (-128, -1), + (126, 127), + (-127, 127), + ): aw = SAW(np.array(data, dtype=np.float32), np.int8) assert aw.slope == 1.0 aw = SAW(np.array([-126, 127 * 2.0], dtype=np.float32), np.int8) @@ -404,7 +407,7 @@ def test_no_offset_scale(): aw = SAW(np.array([-128 * 2.0, 127], dtype=np.float32), np.int8) assert aw.slope == 2 # Test that nasty abs behavior does not upset us - n = -2**15 + n = -(2**15) aw = SAW(np.array([n, n], dtype=np.int16), np.uint8) assert_array_almost_equal(aw.slope, n / 255.0, 5) @@ -431,22 +434,17 @@ def test_io_scaling(): # and from float to integer. bio = BytesIO() for in_type, out_type in itertools.product( - (np.int16, np.uint16, np.float32), - (np.int8, np.uint8, np.int16, np.uint16)): + (np.int16, np.uint16, np.float32), (np.int8, np.uint8, np.int16, np.uint16) + ): out_dtype = np.dtype(out_type) info = type_info(in_type) imin, imax = info['min'], info['max'] if imin == 0: # unsigned int - val_tuples = ((0, imax), - (100, imax)) + val_tuples = ((0, imax), (100, imax)) else: - val_tuples = ((imin, 0, imax), - (imin, 0), - (0, imax), - (imin, 100, imax)) + val_tuples = ((imin, 0, imax), (imin, 0), (0, imax), (imin, 100, imax)) if imin != 0: - val_tuples += ((imin, 0), - (0, imax)) + val_tuples += ((imin, 0), (0, imax)) for vals in val_tuples: arr = np.array(vals, dtype=in_type) aw = SlopeInterArrayWriter(arr, out_dtype) @@ -455,7 +453,7 @@ def test_io_scaling(): arr3 = apply_read_scaling(arr2, aw.slope, aw.inter) # Max rounding error for integer type # Slope might be negative - max_miss = np.abs(aw.slope) / 2. + max_miss = np.abs(aw.slope) / 2.0 abs_err = np.abs(arr - arr3) assert np.all(abs_err <= max_miss) if out_type in UINT_TYPES and 0 in (min(arr), max(arr)): @@ -471,16 +469,14 @@ def test_input_ranges(): bio = BytesIO() working_type = np.float32 work_eps = np.finfo(working_type).eps - for out_type, offset in itertools.product( - IUINT_TYPES, - range(-1000, 1000, 100)): + for out_type, offset in itertools.product(IUINT_TYPES, range(-1000, 1000, 100)): aw = SlopeInterArrayWriter(arr, out_type) aw.to_fileobj(bio) arr2 = array_from_file(arr.shape, out_type, bio) arr3 = apply_read_scaling(arr2, aw.slope, aw.inter) # Max rounding error for integer type # Slope might be negative - max_miss = np.abs(aw.slope) / working_type(2.) + work_eps * 10 + max_miss = np.abs(aw.slope) / working_type(2.0) + work_eps * 10 abs_err = np.abs(arr - arr3) max_err = np.abs(arr) * work_eps + max_miss assert np.all(abs_err <= max_err) @@ -496,10 +492,12 @@ def test_nan2zero(): # nan2zero as argument to `to_fileobj` deprecated, raises error if not the # same as input nan2zero - meaning that by default, nan2zero of False will # raise an error. - arr = np.array([np.nan, 99.], dtype=np.float32) - for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)), - (SlopeArrayWriter, dict(calc_scale=False)), - (SlopeInterArrayWriter, dict(calc_scale=False))): + arr = np.array([np.nan, 99.0], dtype=np.float32) + for awt, kwargs in ( + (ArrayWriter, dict(check_scaling=False)), + (SlopeArrayWriter, dict(calc_scale=False)), + (SlopeInterArrayWriter, dict(calc_scale=False)), + ): # nan2zero default is True # nan2zero ignored for floats aw = awt(arr, np.float32, **kwargs) @@ -527,8 +525,7 @@ def test_byte_orders(): dt = np.dtype(tp) for code in '<>': ndt = dt.newbyteorder(code) - for klass in (SlopeInterArrayWriter, SlopeArrayWriter, - ArrayWriter): + for klass in (SlopeInterArrayWriter, SlopeArrayWriter, ArrayWriter): aw = klass(arr, ndt) data_back = round_trip(aw) assert_array_almost_equal(arr, data_back) @@ -568,8 +565,7 @@ def test_to_float(): arr[-1] = mx for out_type in CFLOAT_TYPES: out_info = type_info(out_type) - for klass in (SlopeInterArrayWriter, SlopeArrayWriter, - ArrayWriter): + for klass in (SlopeInterArrayWriter, SlopeArrayWriter, ArrayWriter): if in_type in COMPLEX_TYPES and out_type in FLOAT_TYPES: with pytest.raises(WriterError): klass(arr, out_type) @@ -678,8 +674,7 @@ def test_int_int_slope(): if kinds in ('ii', 'uu', 'ui'): arrs = (np.array([iinf.min, iinf.max], dtype=in_dt),) elif kinds == 'iu': - arrs = (np.array([iinf.min, 0], dtype=in_dt), - np.array([0, iinf.max], dtype=in_dt)) + arrs = (np.array([iinf.min, 0], dtype=in_dt), np.array([0, iinf.max], dtype=in_dt)) for arr in arrs: try: aw = SlopeArrayWriter(arr, out_dt) @@ -696,17 +691,14 @@ def test_int_int_slope(): def test_float_int_spread(): # Test rounding error for spread of values powers = np.arange(-10, 10, 0.5) - arr = np.concatenate((-10**powers, 10**powers)) + arr = np.concatenate((-(10**powers), 10**powers)) for in_dt in (np.float32, np.float64): arr_t = arr.astype(in_dt) for out_dt in IUINT_TYPES: aw = SlopeInterArrayWriter(arr_t, out_dt) arr_back_sc = round_trip(aw) # Get estimate for error - max_miss = rt_err_estimate(arr_t, - arr_back_sc.dtype, - aw.slope, - aw.inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, aw.slope, aw.inter) # Simulate allclose test with large atol diff = np.abs(arr_t - arr_back_sc) rdiff = diff / np.abs(arr_t) @@ -717,7 +709,7 @@ def rt_err_estimate(arr_t, out_dtype, slope, inter): # Error attributable to rounding slope = 1 if slope is None else slope inter = 1 if inter is None else inter - max_int_miss = slope / 2. + max_int_miss = slope / 2.0 # Estimate error attributable to floating point slope / inter; # Remove inter / slope, put in a float type to simulate the type # promotion for the multiplication, apply slope / inter @@ -741,10 +733,7 @@ def test_rt_bias(): arr_back_sc = round_trip(aw) bias = np.mean(arr_t - arr_back_sc) # Get estimate for error - max_miss = rt_err_estimate(arr_t, - arr_back_sc.dtype, - aw.slope, - aw.inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, aw.slope, aw.inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) assert np.abs(bias) < bias_thresh @@ -774,7 +763,7 @@ def test_nan2zero_scaling(): # Skip impossible combinations if in_info['min'] == 0 and sign == -1: continue - mx = min(in_info['max'], out_info['max'] * 2., 2**32) + mx = min(in_info['max'], out_info['max'] * 2.0, 2**32) vals = [np.nan] + [100, mx] nan_arr = np.array(vals, dtype=in_dt) * sign # Check that nan scales to same value as zero within same array @@ -814,16 +803,18 @@ def test_finite_range_nan(): ([[], []], (np.inf, -np.inf)), # empty array (np.array([[-3, 0, 1], [2, -1, 4]], dtype=int), (-3, 4)), (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), - ([0., 1, 2, 3], (0, 3)), + ([0.0, 1, 2, 3], (0, 3)), # Complex comparison works as if they are floats ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), ): - for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)), - (SlopeArrayWriter, {}), - (SlopeArrayWriter, dict(calc_scale=False)), - (SlopeInterArrayWriter, {}), - (SlopeInterArrayWriter, dict(calc_scale=False))): + for awt, kwargs in ( + (ArrayWriter, dict(check_scaling=False)), + (SlopeArrayWriter, {}), + (SlopeArrayWriter, dict(calc_scale=False)), + (SlopeInterArrayWriter, {}), + (SlopeInterArrayWriter, dict(calc_scale=False)), + ): for out_type in NUMERIC_TYPES: has_nan = np.any(np.isnan(in_arr)) try: @@ -849,7 +840,7 @@ def test_finite_range_nan(): assert aw.has_nan == has_nan assert aw.finite_range() == res # Structured type cannot be nan and we can test this - a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) + a = np.array([[1.0, 0, 1], [2, 3, 4]]).view([('f1', 'f')]) aw = awt(a, a.dtype, **kwargs) with pytest.raises(TypeError): aw.finite_range() diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index 586f277150..d260d2db76 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for BatteryRunner and Report objects +"""Tests for BatteryRunner and Report objects """ from io import StringIO diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index 9f3bfdd93c..ff9e91520e 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -26,51 +26,41 @@ fname=pjoin(data_path, 'example4d+orig.BRIK.gz'), shape=(33, 41, 25, 3), dtype=np.int16, - affine=np.array([[-3.0,0,0,49.5], - [0,-3.0,0,82.312], - [0,0,3.0,-52.3511], - [0,0,0,1.0]]), - zooms=(3., 3., 3., 3.), - data_summary=dict( - min=0, - max=13722, - mean=4266.76024636), + affine=np.array( + [[-3.0, 0, 0, 49.5], [0, -3.0, 0, 82.312], [0, 0, 3.0, -52.3511], [0, 0, 0, 1.0]] + ), + zooms=(3.0, 3.0, 3.0, 3.0), + data_summary=dict(min=0, max=13722, mean=4266.76024636), is_proxy=True, space='ORIG', labels=['#0', '#1', '#2'], - scaling=None), + scaling=None, + ), dict( head=pjoin(data_path, 'scaled+tlrc.HEAD'), fname=pjoin(data_path, 'scaled+tlrc.BRIK'), - shape=(47, 54, 43, 1.), + shape=(47, 54, 43, 1.0), dtype=np.int16, - affine=np.array([[3.0,0,0,-66.], - [0,3.0,0,-87.], - [0,0,3.0,-54.], - [0,0,0,1.0]]), - zooms=(3., 3., 3., 0.), + affine=np.array( + [[3.0, 0, 0, -66.0], [0, 3.0, 0, -87.0], [0, 0, 3.0, -54.0], [0, 0, 0, 1.0]] + ), + zooms=(3.0, 3.0, 3.0, 0.0), data_summary=dict( - min=1.9416814999999998e-07, - max=0.0012724615542099998, - mean=0.00023919645351876782), + min=1.9416814999999998e-07, max=0.0012724615542099998, mean=0.00023919645351876782 + ), is_proxy=True, space='TLRC', labels=['#0'], - scaling=np.array([ 3.88336300e-08]), - ) + scaling=np.array([3.88336300e-08]), + ), ] EXAMPLE_BAD_IMAGES = [ - dict( - head=pjoin(data_path, 'bad_datatype+orig.HEAD'), - err=brikhead.AFNIImageError - ), - dict( - head=pjoin(data_path, 'bad_attribute+orig.HEAD'), - err=brikhead.AFNIHeaderError - ) + dict(head=pjoin(data_path, 'bad_datatype+orig.HEAD'), err=brikhead.AFNIImageError), + dict(head=pjoin(data_path, 'bad_attribute+orig.HEAD'), err=brikhead.AFNIHeaderError), ] + class TestAFNIHeader: module = brikhead test_files = EXAMPLE_IMAGES @@ -139,8 +129,10 @@ def test_brikheadfile(self): class TestBadVars: module = brikhead - vars = ['type = badtype-attribute\nname = BRICK_TYPES\ncount = 1\n1\n', - 'type = integer-attribute\ncount = 1\n1\n'] + vars = [ + 'type = badtype-attribute\nname = BRICK_TYPES\ncount = 1\n1\n', + 'type = integer-attribute\ncount = 1\n1\n', + ] def test_unpack_var(self): for var in self.vars: diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index b8f56454b5..d16541b352 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -1,16 +1,26 @@ -""" Test casting utilities +"""Test casting utilities """ import os from platform import machine import numpy as np -from ..casting import (float_to_int, shared_range, CastingError, int_to_float, - as_int, int_abs, floor_log2, able_int_type, best_float, - ulp, longdouble_precision_improved) +from ..casting import ( + float_to_int, + shared_range, + CastingError, + int_to_float, + as_int, + int_abs, + floor_log2, + able_int_type, + best_float, + ulp, + longdouble_precision_improved, +) from ..testing import suppress_warnings -from numpy.testing import (assert_array_almost_equal, assert_array_equal) +from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest @@ -148,7 +158,7 @@ def test_int_abs(): def test_floor_log2(): assert floor_log2(2**9 + 1) == 9 - assert floor_log2(-2**9 + 1) == 8 + assert floor_log2(-(2**9) + 1) == 8 assert floor_log2(2) == 1 assert floor_log2(1) == 0 assert floor_log2(0.5) == -1 @@ -161,19 +171,20 @@ def test_floor_log2(): def test_able_int_type(): # The integer type cabable of containing values for vals, exp_out in ( - ([0, 1], np.uint8), - ([0, 255], np.uint8), - ([-1, 1], np.int8), - ([0, 256], np.uint16), - ([-1, 128], np.int16), - ([0.1, 1], None), - ([0, 2**16], np.uint32), - ([-1, 2**15], np.int32), - ([0, 2**32], np.uint64), - ([-1, 2**31], np.int64), - ([-1, 2**64 - 1], None), - ([0, 2**64 - 1], np.uint64), - ([0, 2**64], None)): + ([0, 1], np.uint8), + ([0, 255], np.uint8), + ([-1, 1], np.int8), + ([0, 256], np.uint16), + ([-1, 128], np.int16), + ([0.1, 1], None), + ([0, 2**16], np.uint32), + ([-1, 2**15], np.int32), + ([0, 2**32], np.uint64), + ([-1, 2**31], np.int64), + ([-1, 2**64 - 1], None), + ([0, 2**64 - 1], np.uint64), + ([0, 2**64], None), + ): assert able_int_type(vals) == exp_out @@ -200,7 +211,7 @@ def test_able_casting(): def test_best_float(): # Finds the most capable floating point type - """ most capable type will be np.longdouble except when + """most capable type will be np.longdouble except when * np.longdouble has float64 precision (MSVC compiled numpy) * machine is sparc64 (float128 very slow) @@ -213,9 +224,11 @@ def test_best_float(): assert end_of_ints == end_of_ints + 1 # longdouble may have more, but not on 32 bit windows, at least end_of_ints = np.longdouble(2**53) - if (end_of_ints == (end_of_ints + 1) or # off continuous integers - machine() == 'sparc64' or # crippling slow longdouble on sparc - longdouble_precision_improved()): # Windows precisions can change + if ( + end_of_ints == (end_of_ints + 1) + or machine() == 'sparc64' # off continuous integers + or longdouble_precision_improved() # crippling slow longdouble on sparc + ): # Windows precisions can change assert best == np.float64 else: assert best == np.longdouble @@ -224,6 +237,7 @@ def test_best_float(): def test_longdouble_precision_improved(): # Just check that this can only be True on windows, msvc from numpy.distutils.ccompiler import get_default_compiler + if not (os.name == 'nt' and get_default_compiler() == 'msvc'): assert not longdouble_precision_improved() @@ -248,8 +262,8 @@ def test_ulp(): assert np.isnan(ulp(-np.inf)) assert np.isnan(ulp(np.nan)) # 0 gives subnormal smallest - subn64 = np.float64(2**(-1022 - 52)) - subn32 = np.float32(2**(-126 - 23)) + subn64 = np.float64(2 ** (-1022 - 52)) + subn32 = np.float32(2 ** (-126 - 23)) assert ulp(0.0) == subn64 assert ulp(np.float64(0)) == subn64 assert ulp(np.float32(0)) == subn32 diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 0c1671dfbf..0fbadc6af0 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -1,16 +1,23 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Tests for data module """ +"""Tests for data module""" import os from os.path import join as pjoin from os import environ as env import sys import tempfile -from ..data import (get_data_path, find_data_dir, - DataError, _cfg_value, make_datasource, - Datasource, VersionedDatasource, Bomber, - datasource_or_bomber) +from ..data import ( + get_data_path, + find_data_dir, + DataError, + _cfg_value, + make_datasource, + Datasource, + VersionedDatasource, + Bomber, + datasource_or_bomber, +) from ..tmpdirs import TemporaryDirectory @@ -182,8 +189,7 @@ def test_find_data_dir(): def test_make_datasource(with_nimd_env): - pkg_def = dict( - relpath='pkg') + pkg_def = dict(relpath='pkg') with TemporaryDirectory() as tmpdir: nibd.get_data_path = lambda: [tmpdir] with pytest.raises(DataError): diff --git a/nibabel/tests/test_dataobj_images.py b/nibabel/tests/test_dataobj_images.py index e0f042939a..dfbb0fe4cb 100644 --- a/nibabel/tests/test_dataobj_images.py +++ b/nibabel/tests/test_dataobj_images.py @@ -1,4 +1,4 @@ -""" Testing dataobj_images module +"""Testing dataobj_images module """ import numpy as np @@ -43,7 +43,7 @@ def set_data_dtype(self, dtype): class TestDataobjAPI(_TFI, DataInterfaceMixin): - """ Validation for DataobjImage instances - """ + """Validation for DataobjImage instances""" + # A callable returning an image from ``image_maker(data, header)`` image_maker = DoNumpyImage diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index c09fda4988..cd56f507f9 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -1,12 +1,11 @@ -""" Testing `deprecated` module +"""Testing `deprecated` module """ import warnings import pytest from nibabel import pkg_info -from nibabel.deprecated import (ModuleProxy, FutureWarningMixin, - deprecate_with_version) +from nibabel.deprecated import ModuleProxy, FutureWarningMixin, deprecate_with_version from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF @@ -33,7 +32,6 @@ def test_module_proxy(): def test_futurewarning_mixin(): # Test mixin for FutureWarning class C: - def __init__(self, val): self.val = val @@ -44,7 +42,8 @@ class D(FutureWarningMixin, C): pass class E(FutureWarningMixin, C): - warn_message = "Oh no, not this one" + warn_message = 'Oh no, not this one' + with warnings.catch_warnings(record=True) as warns: c = C(42) assert c.meth() == 42 @@ -53,8 +52,7 @@ class E(FutureWarningMixin, C): assert d.meth() == 42 warn = warns.pop(0) assert warn.category == FutureWarning - assert (str(warn.message) == - 'This class will be removed in future versions') + assert str(warn.message) == 'This class will be removed in future versions' e = E(42) assert e.meth() == 42 warn = warns.pop(0) @@ -63,7 +61,7 @@ class E(FutureWarningMixin, C): class TestNibabelDeprecator(_TestDF): - """ Test deprecations against nibabel version """ + """Test deprecations against nibabel version""" dep_func = deprecate_with_version diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 0280692299..31b61f5153 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -1,4 +1,4 @@ -""" Testing deprecator module / Deprecator class +"""Testing deprecator module / Deprecator class """ import sys @@ -8,9 +8,14 @@ import pytest -from nibabel.deprecator import (_ensure_cr, _add_dep_doc, - ExpiredDeprecationError, Deprecator, - TESTSETUP, TESTCLEANUP) +from nibabel.deprecator import ( + _ensure_cr, + _add_dep_doc, + ExpiredDeprecationError, + Deprecator, + TESTSETUP, + TESTCLEANUP, +) from ..testing import clear_and_catch_warnings @@ -35,18 +40,22 @@ def test__add_dep_doc(): assert _add_dep_doc(' bar', 'foo\n') == ' bar\n\nfoo\n' assert _add_dep_doc('bar\n\n', 'foo') == 'bar\n\nfoo\n' assert _add_dep_doc('bar\n \n', 'foo') == 'bar\n\nfoo\n' - assert (_add_dep_doc(' bar\n\nSome explanation', 'foo\nbaz') == - ' bar\n\nfoo\nbaz\n\nSome explanation\n') - assert (_add_dep_doc(' bar\n\n Some explanation', 'foo\nbaz') == - ' bar\n \n foo\n baz\n \n Some explanation\n') + assert ( + _add_dep_doc(' bar\n\nSome explanation', 'foo\nbaz') + == ' bar\n\nfoo\nbaz\n\nSome explanation\n' + ) + assert ( + _add_dep_doc(' bar\n\n Some explanation', 'foo\nbaz') + == ' bar\n \n foo\n baz\n \n Some explanation\n' + ) class CustomError(Exception): - """ Custom error class for testing expired deprecation errors """ + """Custom error class for testing expired deprecation errors""" def cmp_func(v): - """ Comparison func tests against version 2.0 """ + """Comparison func tests against version 2.0""" return (float(v) > 2) - (float(v) < 2) @@ -55,15 +64,15 @@ def func_no_doc(): def func_doc(i): - "A docstring" + """A docstring""" def func_doc_long(i, j): - "A docstring\n\n Some text" + """A docstring\n\n Some text""" class TestDeprecatorFunc: - """ Test deprecator function specified in ``dep_func`` """ + """Test deprecator function specified in ``dep_func``""" dep_func = Deprecator(cmp_func) @@ -83,9 +92,11 @@ def test_dep_func(self): with pytest.deprecated_call() as w: assert func(1, 2) is None assert len(w) == 1 - assert (func.__doc__ == - f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' - f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}') + assert ( + func.__doc__ + == f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' + f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}' + ) # Try some since and until versions func = dec('foo', '1.1')(func_no_doc) @@ -97,21 +108,24 @@ def test_dep_func(self): with pytest.deprecated_call() as w: assert func() is None assert len(w) == 1 - assert (func.__doc__ == - f'foo\n\n* Will raise {ExpiredDeprecationError} as of version: 99.4\n') + assert ( + func.__doc__ == f'foo\n\n* Will raise {ExpiredDeprecationError} as of version: 99.4\n' + ) func = dec('foo', until='1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() - assert (func.__doc__ == - f'foo\n\n* Raises {ExpiredDeprecationError} as of version: 1.8\n') + assert func.__doc__ == f'foo\n\n* Raises {ExpiredDeprecationError} as of version: 1.8\n' func = dec('foo', '1.2', '1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() - assert (func.__doc__ == - 'foo\n\n* deprecated from version: 1.2\n* Raises ' - f'{ExpiredDeprecationError} as of version: 1.8\n') + assert ( + func.__doc__ == 'foo\n\n* deprecated from version: 1.2\n* Raises ' + f'{ExpiredDeprecationError} as of version: 1.8\n' + ) func = dec('foo', '1.2', '1.8')(func_doc_long) - assert func.__doc__ == f"""\ + assert ( + func.__doc__ + == f"""\ A docstring foo @@ -119,6 +133,7 @@ def test_dep_func(self): * deprecated from version: 1.2 * Raises {ExpiredDeprecationError} as of version: 1.8 """ + ) with pytest.raises(ExpiredDeprecationError): func() @@ -140,7 +155,7 @@ def test_dep_func(self): class TestDeprecatorMaker: - """ Test deprecator class creation with custom warnings and errors """ + """Test deprecator class creation with custom warnings and errors""" dep_maker = partial(Deprecator, cmp_func) diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index 61e031b8d3..b43b2762f7 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -1,4 +1,4 @@ -""" Testing dft +"""Testing dft """ import os @@ -32,15 +32,16 @@ def setUpModule(): class Test_DBclass: """Some tests on the database manager class that don't get exercised through the API""" + def setup_method(self): - self._db = dft._DB(fname=":memory:", verbose=False) + self._db = dft._DB(fname=':memory:', verbose=False) def test_repr(self): assert repr(self._db) == "" def test_cursor_conflict(self): rwc = self._db.readwrite_cursor - statement = ("INSERT INTO directory (path, mtime) VALUES (?, ?)", ("/tmp", 0)) + statement = ('INSERT INTO directory (path, mtime) VALUES (?, ?)', ('/tmp', 0)) with pytest.raises(sqlite3.IntegrityError): # Whichever exits first will commit and make the second violate uniqueness with rwc() as c1, rwc() as c2: @@ -52,8 +53,8 @@ def test_cursor_conflict(self): def db(monkeypatch): """Build a dft database in memory to avoid cross-process races and not modify the host filesystem.""" - database = dft._DB(fname=":memory:") - monkeypatch.setattr(dft, "DB", database) + database = dft._DB(fname=':memory:') + monkeypatch.setattr(dft, 'DB', database) yield database @@ -69,8 +70,7 @@ def test_study(db): for base_dir in (data_dir, None): studies = dft.get_studies(base_dir) assert len(studies) == 1 - assert (studies[0].uid == - '1.3.12.2.1107.5.2.32.35119.30000010011408520750000000022') + assert studies[0].uid == '1.3.12.2.1107.5.2.32.35119.30000010011408520750000000022' assert studies[0].date == '20100114' assert studies[0].time == '121314.000000' assert studies[0].comments == 'dft study comments' @@ -84,8 +84,7 @@ def test_series(db): studies = dft.get_studies(data_dir) assert len(studies[0].series) == 1 ser = studies[0].series[0] - assert (ser.uid == - '1.3.12.2.1107.5.2.32.35119.2010011420292594820699190.0.0.0') + assert ser.uid == '1.3.12.2.1107.5.2.32.35119.2010011420292594820699190.0.0.0' assert ser.number == '12' assert ser.description == 'CBU_DTI_64D_1A' assert ser.rows == 256 @@ -100,10 +99,8 @@ def test_storage_instances(db): assert len(sis) == 2 assert sis[0].instance_number == 1 assert sis[1].instance_number == 2 - assert (sis[0].uid == - '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.0') - assert (sis[1].uid == - '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.1') + assert sis[0].uid == '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.0' + assert sis[1].uid == '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.1' @unittest.skipUnless(have_pil, 'could not import PIL.Image') diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py index 4897198668..b1f05177bb 100644 --- a/nibabel/tests/test_diff.py +++ b/nibabel/tests/test_diff.py @@ -1,9 +1,9 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Test diff +"""Test diff """ -from os.path import (dirname, join as pjoin, abspath) +from os.path import dirname, join as pjoin, abspath import numpy as np @@ -23,18 +23,18 @@ def test_diff_values_int(): def test_diff_values_float(): - assert not are_values_different(0., 0.) - assert not are_values_different(0., 0., 0.) # can take more + assert not are_values_different(0.0, 0.0) + assert not are_values_different(0.0, 0.0, 0.0) # can take more assert not are_values_different(1.1, 1.1) - assert are_values_different(0., 1.1) - assert are_values_different(0., 0, 1.1) - assert are_values_different(1., 2.) + assert are_values_different(0.0, 1.1) + assert are_values_different(0.0, 0, 1.1) + assert are_values_different(1.0, 2.0) def test_diff_values_mixed(): assert are_values_different(1.0, 1) - assert are_values_different(1.0, "1") - assert are_values_different(1, "1") + assert are_values_different(1.0, '1') + assert are_values_different(1, '1') assert are_values_different(1, None) assert are_values_different(np.ndarray([0]), 'hey') assert not are_values_different(None, None) @@ -42,6 +42,7 @@ def test_diff_values_mixed(): def test_diff_values_array(): from numpy import nan, array, inf + a_int = array([1, 2]) a_float = a_int.astype(float) diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 9e56fd73c7..875e06c0a7 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -13,8 +13,14 @@ import numpy as np from ..openers import Opener -from ..ecat import (EcatHeader, EcatSubHeader, EcatImage, read_mlist, - get_frame_order, get_series_framenumbers) +from ..ecat import ( + EcatHeader, + EcatSubHeader, + EcatImage, + read_mlist, + get_frame_order, + get_series_framenumbers, +) from unittest import TestCase import pytest @@ -62,8 +68,7 @@ def test_header_codes(self): newhdr = hdr.from_fileobj(fid) fid.close() assert newhdr.get_filetype() == 'ECAT7_VOLUME16' - assert (newhdr.get_patient_orient() == - 'ECAT7_Unknown_Orientation') + assert newhdr.get_patient_orient() == 'ECAT7_Unknown_Orientation' def test_update(self): hdr = self.header_class() @@ -98,18 +103,16 @@ def test_mlist(self): assert get_frame_order(mlist)[0][0] == 0 assert get_frame_order(mlist)[0][1] == 16842758.0 # test badly ordered mlist - badordermlist = np.array([[1.68427540e+07, 3.00000000e+00, - 1.20350000e+04, 1.00000000e+00], - [1.68427530e+07, 1.20360000e+04, - 2.40680000e+04, 1.00000000e+00], - [1.68427550e+07, 2.40690000e+04, - 3.61010000e+04, 1.00000000e+00], - [1.68427560e+07, 3.61020000e+04, - 4.81340000e+04, 1.00000000e+00], - [1.68427570e+07, 4.81350000e+04, - 6.01670000e+04, 1.00000000e+00], - [1.68427580e+07, 6.01680000e+04, - 7.22000000e+04, 1.00000000e+00]]) + badordermlist = np.array( + [ + [1.68427540e07, 3.00000000e00, 1.20350000e04, 1.00000000e00], + [1.68427530e07, 1.20360000e04, 2.40680000e04, 1.00000000e00], + [1.68427550e07, 2.40690000e04, 3.61010000e04, 1.00000000e00], + [1.68427560e07, 3.61020000e04, 4.81340000e04, 1.00000000e00], + [1.68427570e07, 4.81350000e04, 6.01670000e04, 1.00000000e00], + [1.68427580e07, 6.01680000e04, 7.22000000e04, 1.00000000e00], + ] + ) with suppress_warnings(): # STORED order assert get_frame_order(badordermlist)[0][0] == 1 @@ -118,18 +121,16 @@ def test_mlist_errors(self): hdr = self.header_class.from_fileobj(fid) hdr['num_frames'] = 6 mlist = read_mlist(fid, hdr.endianness) - mlist = np.array([[1.68427540e+07, 3.00000000e+00, - 1.20350000e+04, 1.00000000e+00], - [1.68427530e+07, 1.20360000e+04, - 2.40680000e+04, 1.00000000e+00], - [1.68427550e+07, 2.40690000e+04, - 3.61010000e+04, 1.00000000e+00], - [1.68427560e+07, 3.61020000e+04, - 4.81340000e+04, 1.00000000e+00], - [1.68427570e+07, 4.81350000e+04, - 6.01670000e+04, 1.00000000e+00], - [1.68427580e+07, 6.01680000e+04, - 7.22000000e+04, 1.00000000e+00]]) + mlist = np.array( + [ + [1.68427540e07, 3.00000000e00, 1.20350000e04, 1.00000000e00], + [1.68427530e07, 1.20360000e04, 2.40680000e04, 1.00000000e00], + [1.68427550e07, 2.40690000e04, 3.61010000e04, 1.00000000e00], + [1.68427560e07, 3.61020000e04, 4.81340000e04, 1.00000000e00], + [1.68427570e07, 4.81350000e04, 6.01670000e04, 1.00000000e00], + [1.68427580e07, 6.01680000e04, 7.22000000e04, 1.00000000e00], + ] + ) with suppress_warnings(): # STORED order series_framenumbers = get_series_framenumbers(mlist) # first frame stored was actually 2nd frame acquired @@ -162,15 +163,15 @@ def test_subheader_size(self): def test_subheader(self): assert self.subhdr.get_shape() == (10, 10, 3) assert self.subhdr.get_nframes() == 1 - assert (self.subhdr.get_nframes() == - len(self.subhdr.subheaders)) + assert self.subhdr.get_nframes() == len(self.subhdr.subheaders) assert self.subhdr._check_affines() is True - assert_array_almost_equal(np.diag(self.subhdr.get_frame_affine()), - np.array([2.20241979, 2.20241979, 3.125, 1.])) + assert_array_almost_equal( + np.diag(self.subhdr.get_frame_affine()), np.array([2.20241979, 2.20241979, 3.125, 1.0]) + ) assert self.subhdr.get_zooms()[0] == 2.20241978764534 assert self.subhdr.get_zooms()[2] == 3.125 assert self.subhdr._get_data_dtype(0) == np.int16 - #assert_equal(self.subhdr._get_frame_offset(), 1024) + # assert_equal(self.subhdr._get_frame_offset(), 1024) assert self.subhdr._get_frame_offset() == 1536 dat = self.subhdr.raw_data_from_fileobj() assert dat.shape == self.subhdr.get_shape() @@ -185,10 +186,8 @@ class TestEcatImage(TestCase): img = image_class.load(example_file) def test_file(self): - assert (self.img.file_map['header'].filename == - self.example_file) - assert (self.img.file_map['image'].filename == - self.example_file) + assert self.img.file_map['header'].filename == self.example_file + assert self.img.file_map['image'].filename == self.example_file def test_save(self): tmp_file = 'tinypet_tmp.v' @@ -229,11 +228,13 @@ def test_array_proxy_slicing(self): def test_isolation(self): # Test image isolated from external changes to affine img_klass = self.image_class - arr, aff, hdr, sub_hdr, mlist = (self.img.get_fdata(), - self.img.affine, - self.img.header, - self.img.get_subheaders(), - self.img.get_mlist()) + arr, aff, hdr, sub_hdr, mlist = ( + self.img.get_fdata(), + self.img.affine, + self.img.header, + self.img.get_subheaders(), + self.img.get_mlist(), + ) img = img_klass(arr, aff, hdr, sub_hdr, mlist) assert_array_equal(img.affine, aff) aff[0, 0] = 99 @@ -242,11 +243,13 @@ def test_isolation(self): def test_float_affine(self): # Check affines get converted to float img_klass = self.image_class - arr, aff, hdr, sub_hdr, mlist = (self.img.get_fdata(), - self.img.affine, - self.img.header, - self.img.get_subheaders(), - self.img.get_mlist()) + arr, aff, hdr, sub_hdr, mlist = ( + self.img.get_fdata(), + self.img.affine, + self.img.header, + self.img.get_subheaders(), + self.img.get_mlist(), + ) img = img_klass(arr, aff.astype(np.float32), hdr, sub_hdr, mlist) assert img.affine.dtype == np.dtype(np.float64) img = img_klass(arr, aff.astype(np.int16), hdr, sub_hdr, mlist) @@ -255,9 +258,7 @@ def test_float_affine(self): def test_data_regression(self): # Test whether data read has changed since 1.3.0 # These values came from reading the example image using nibabel 1.3.0 - vals = dict(max=248750736458.0, - min=1125342630.0, - mean=117907565661.46666) + vals = dict(max=248750736458.0, min=1125342630.0, mean=117907565661.46666) data = self.img.get_fdata() assert data.max() == vals['max'] assert data.min() == vals['min'] @@ -265,5 +266,4 @@ def test_data_regression(self): def test_mlist_regression(self): # Test mlist is as same as for nibabel 1.3.0 - assert_array_equal(self.img.get_mlist(), - [[16842758, 3, 3011, 1]]) + assert_array_equal(self.img.get_mlist(), [[16842758, 3, 3011, 1]]) diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index dcd812c52d..de4164cd3c 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test we can correctly import example ECAT files +"""Test we can correctly import example ECAT files """ import os @@ -17,7 +17,7 @@ from .nibabel_data import get_nibabel_data, needs_nibabel_data from ..ecat import load -from numpy.testing import (assert_array_equal, assert_almost_equal) +from numpy.testing import assert_array_equal, assert_almost_equal ECAT_TEST_PATH = pjoin(get_nibabel_data(), 'nipy-ecattest') diff --git a/nibabel/tests/test_endiancodes.py b/nibabel/tests/test_endiancodes.py index a9af11f052..ce460efbb3 100644 --- a/nibabel/tests/test_endiancodes.py +++ b/nibabel/tests/test_endiancodes.py @@ -6,11 +6,11 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for endiancodes module """ +"""Tests for endiancodes module""" import sys -from ..volumeutils import (endian_codes, native_code, swapped_code) +from ..volumeutils import endian_codes, native_code, swapped_code def test_native_swapped(): diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index 19891a607b..5742edef43 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -1,4 +1,4 @@ -""" Testing environment settings +"""Testing environment settings """ import os @@ -44,7 +44,7 @@ def test_user_dir(with_environment): if USER_KEY in env: del env[USER_KEY] home_dir = nibe.get_home_dir() - if os.name == "posix": + if os.name == 'posix': exp = pjoin(home_dir, '.nipy') else: exp = pjoin(home_dir, '_nipy') diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index d60b0b8b2e..25e4c776d2 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for Euler angles """ +"""Tests for Euler angles""" import math import numpy as np @@ -32,28 +32,19 @@ def x_only(x): cosx = np.cos(x) sinx = np.sin(x) - return np.array( - [[1, 0, 0], - [0, cosx, -sinx], - [0, sinx, cosx]]) + return np.array([[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) def y_only(y): cosy = np.cos(y) siny = np.sin(y) - return np.array( - [[cosy, 0, siny], - [0, 1, 0], - [-siny, 0, cosy]]) + return np.array([[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]]) def z_only(z): cosz = np.cos(z) sinz = np.sin(z) - return np.array( - [[cosz, -sinz, 0], - [sinz, cosz, 0], - [0, 0, 1]]) + return np.array([[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]]) def sympy_euler(z, y, x): @@ -63,10 +54,16 @@ def sympy_euler(z, y, x): # the following copy / pasted from Sympy - see derivations subdirectory return [ [cos(y) * cos(z), -cos(y) * sin(z), sin(y)], - [cos(x) * sin(z) + cos(z) * sin(x) * sin(y), cos(x) * - cos(z) - sin(x) * sin(y) * sin(z), -cos(y) * sin(x)], - [sin(x) * sin(z) - cos(x) * cos(z) * sin(y), cos(z) * - sin(x) + cos(x) * sin(y) * sin(z), cos(x) * cos(y)] + [ + cos(x) * sin(z) + cos(z) * sin(x) * sin(y), + cos(x) * cos(z) - sin(x) * sin(y) * sin(z), + -cos(y) * sin(x), + ], + [ + sin(x) * sin(z) - cos(x) * cos(z) * sin(y), + cos(z) * sin(x) + cos(x) * sin(y) * sin(z), + cos(x) * cos(y), + ], ] @@ -100,15 +97,15 @@ def test_basic_euler(): assert np.all(nea.euler2mat(0, 0, xr) == nea.euler2mat(x=xr)) # Applying an opposite rotation same as inverse (the inverse is # the same as the transpose, but just for clarity) - assert np.allclose(nea.euler2mat(x=-xr), - np.linalg.inv(nea.euler2mat(x=xr))) + assert np.allclose(nea.euler2mat(x=-xr), np.linalg.inv(nea.euler2mat(x=xr))) def test_euler_mat_1(): M = nea.euler2mat() assert_array_equal(M, np.eye(3)) -@pytest.mark.parametrize("x, y, z", eg_rots) + +@pytest.mark.parametrize('x, y, z', eg_rots) def test_euler_mat_2(x, y, z): M1 = nea.euler2mat(z, y, x) M2 = sympy_euler(z, y, x) @@ -128,14 +125,16 @@ def sympy_euler2quat(z=0, y=0, x=0): cos = math.cos sin = math.sin # the following copy / pasted from Sympy output - return (cos(0.5 * x) * cos(0.5 * y) * cos(0.5 * z) - sin(0.5 * x) * sin(0.5 * y) * sin(0.5 * z), - cos(0.5 * x) * sin(0.5 * y) * sin(0.5 * z) + cos(0.5 * y) * cos(0.5 * z) * sin(0.5 * x), - cos(0.5 * x) * cos(0.5 * z) * sin(0.5 * y) - cos(0.5 * y) * sin(0.5 * x) * sin(0.5 * z), - cos(0.5 * x) * cos(0.5 * y) * sin(0.5 * z) + cos(0.5 * z) * sin(0.5 * x) * sin(0.5 * y)) + return ( + cos(0.5 * x) * cos(0.5 * y) * cos(0.5 * z) - sin(0.5 * x) * sin(0.5 * y) * sin(0.5 * z), + cos(0.5 * x) * sin(0.5 * y) * sin(0.5 * z) + cos(0.5 * y) * cos(0.5 * z) * sin(0.5 * x), + cos(0.5 * x) * cos(0.5 * z) * sin(0.5 * y) - cos(0.5 * y) * sin(0.5 * x) * sin(0.5 * z), + cos(0.5 * x) * cos(0.5 * y) * sin(0.5 * z) + cos(0.5 * z) * sin(0.5 * x) * sin(0.5 * y), + ) def crude_mat2euler(M): - """ The simplest possible - ignoring atan2 instability """ + """The simplest possible - ignoring atan2 instability""" r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat return math.atan2(-r12, r11), math.asin(r13), math.atan2(-r23, r33) @@ -159,7 +158,7 @@ def test_euler_instability(): assert not np.allclose(M_e, M_e_back) -@pytest.mark.parametrize("x, y, z", eg_rots) +@pytest.mark.parametrize('x, y, z', eg_rots) def test_quats(x, y, z): M1 = nea.euler2mat(z, y, x) quatM = nq.mat2quat(M1) diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index d01440eb65..aee02f5a68 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -1,4 +1,4 @@ -""" Testing filebasedimages module +"""Testing filebasedimages module """ from itertools import product @@ -59,8 +59,8 @@ class SerializableNumpyImage(FBNumpyImage, SerializableImage): class TestFBImageAPI(GenericImageAPI): - """ Validation for FileBasedImage instances - """ + """Validation for FileBasedImage instances""" + # A callable returning an image from ``image_maker(data, header)`` image_maker = FBNumpyImage # A callable returning a header from ``header_maker()`` @@ -80,11 +80,7 @@ def obj_params(self): arr = np.arange(np.prod(shape), dtype=dtype).reshape(shape) hdr = self.header_maker() func = self.make_imaker(arr.copy(), hdr) - params = dict( - dtype=dtype, - data=arr, - shape=shape, - is_proxy=False) + params = dict(dtype=dtype, data=arr, shape=shape, is_proxy=False) yield func, params @@ -93,8 +89,8 @@ class TestSerializableImageAPI(TestFBImageAPI, SerializeMixin): @staticmethod def _header_eq(header_a, header_b): - """ FileBasedHeader is an abstract class, so __eq__ is undefined. - Checking for the same header type is sufficient, here. """ + """FileBasedHeader is an abstract class, so __eq__ is undefined. + Checking for the same header type is sufficient, here.""" return type(header_a) == type(header_b) == FileBasedHeader @@ -102,7 +98,6 @@ def test_filebased_header(): # Test stuff about the default FileBasedHeader class H(FileBasedHeader): - def __init__(self, seq=None): if seq is None: seq = [] diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index ed1e80e70a..73698b23ac 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -21,7 +21,7 @@ from ..nifti1 import Nifti1Image -@unittest.skipIf(SOFT_LIMIT > 4900, "It would take too long to test filehandles") +@unittest.skipIf(SOFT_LIMIT > 4900, 'It would take too long to test filehandles') def test_multiload(): # Make a tiny image, save, load many times. If we are leaking filehandles, # this will cause us to run out and generate an error diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index e31a6efcbc..a0e50e4133 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -1,4 +1,4 @@ -""" Testing fileholders +"""Testing fileholders """ from io import BytesIO diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 49112036d9..b4a816a137 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -6,10 +6,9 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for filename container """ +"""Tests for filename container""" -from ..filename_parser import (types_filenames, TypesFilenamesError, - parse_filename, splitext_addext) +from ..filename_parser import types_filenames, TypesFilenamesError, parse_filename, splitext_addext import pytest @@ -40,9 +39,9 @@ def test_filenames(): types_filenames('test.img.gz', types_exts, ()) # if we don't know about .gz extension, and not enforcing, then we # get something a bit odd - tfns = types_filenames('test.img.gz', types_exts, - trailing_suffixes=(), - enforce_extensions=False) + tfns = types_filenames( + 'test.img.gz', types_exts, trailing_suffixes=(), enforce_extensions=False + ) assert tfns == {'header': 'test.img.hdr', 'image': 'test.img.gz'} # the suffixes we remove and replaces can be any suffixes. tfns = types_filenames('test.img.bzr', types_exts, ('.bzr',)) @@ -50,9 +49,9 @@ def test_filenames(): # If we specifically pass the remove / replace suffixes, then we # don't remove / replace the .gz and .bz2, unless they are passed # specifically. - tfns = types_filenames('test.img.bzr', types_exts, - trailing_suffixes=('.bzr',), - enforce_extensions=False) + tfns = types_filenames( + 'test.img.bzr', types_exts, trailing_suffixes=('.bzr',), enforce_extensions=False + ) assert tfns == {'header': 'test.hdr.bzr', 'image': 'test.img.bzr'} # but, just .gz or .bz2 as extension gives an error, if enforcing is on with pytest.raises(TypesFilenamesError): @@ -61,8 +60,7 @@ def test_filenames(): types_filenames('test.bz2', types_exts) # if enforcing is off, it tries to work out what the other files # should be assuming the passed filename is of the first input type - tfns = types_filenames('test.gz', types_exts, - enforce_extensions=False) + tfns = types_filenames('test.gz', types_exts, enforce_extensions=False) assert tfns == {'image': 'test.gz', 'header': 'test.hdr.gz'} # case (in)sensitivity, and effect of uppercase, lowercase tfns = types_filenames('test.IMG', types_exts) @@ -76,41 +74,29 @@ def test_filenames(): def test_parse_filename(): types_exts = (('t1', 'ext1'), ('t2', 'ext2')) exp_in_outs = ( - (('/path/fname.funny', ()), - ('/path/fname', '.funny', None, None)), - (('/path/fnameext2', ()), - ('/path/fname', 'ext2', None, 't2')), - (('/path/fnameext2', ('.gz',)), - ('/path/fname', 'ext2', None, 't2')), - (('/path/fnameext2.gz', ('.gz',)), - ('/path/fname', 'ext2', '.gz', 't2')) + (('/path/fname.funny', ()), ('/path/fname', '.funny', None, None)), + (('/path/fnameext2', ()), ('/path/fname', 'ext2', None, 't2')), + (('/path/fnameext2', ('.gz',)), ('/path/fname', 'ext2', None, 't2')), + (('/path/fnameext2.gz', ('.gz',)), ('/path/fname', 'ext2', '.gz', 't2')), ) for inps, exps in exp_in_outs: pth, sufs = inps res = parse_filename(pth, types_exts, sufs) assert res == exps upth = pth.upper() - uexps = (exps[0].upper(), exps[1].upper(), - exps[2].upper() if exps[2] else None, - exps[3]) + uexps = (exps[0].upper(), exps[1].upper(), exps[2].upper() if exps[2] else None, exps[3]) res = parse_filename(upth, types_exts, sufs) assert res == uexps # test case sensitivity - res = parse_filename('/path/fnameext2.GZ', - types_exts, - ('.gz',), False) # case insensitive again + res = parse_filename( + '/path/fnameext2.GZ', types_exts, ('.gz',), False + ) # case insensitive again assert res == ('/path/fname', 'ext2', '.GZ', 't2') - res = parse_filename('/path/fnameext2.GZ', - types_exts, - ('.gz',), True) # case sensitive + res = parse_filename('/path/fnameext2.GZ', types_exts, ('.gz',), True) # case sensitive assert res == ('/path/fnameext2', '.GZ', None, None) - res = parse_filename('/path/fnameEXT2.gz', - types_exts, - ('.gz',), False) # case insensitive + res = parse_filename('/path/fnameEXT2.gz', types_exts, ('.gz',), False) # case insensitive assert res == ('/path/fname', 'EXT2', '.gz', 't2') - res = parse_filename('/path/fnameEXT2.gz', - types_exts, - ('.gz',), True) # case sensitive + res = parse_filename('/path/fnameEXT2.gz', types_exts, ('.gz',), True) # case sensitive assert res == ('/path/fnameEXT2', '', '.gz', None) diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index d3c895618e..80c4a0ab92 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing filesets - a draft - +"""Testing filesets - a draft """ import numpy as np @@ -20,12 +19,14 @@ from numpy.testing import assert_array_equal import pytest + def test_files_spatialimages(): # test files creation in image classes arr = np.zeros((2, 3, 4)) aff = np.eye(4) - klasses = [klass for klass in all_image_classes - if klass.rw and issubclass(klass, SpatialImage)] + klasses = [ + klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage) + ] for klass in klasses: file_map = klass.make_file_map() for key, value in file_map.items(): @@ -88,8 +89,11 @@ def test_round_trip_spatialimages(): # write an image to files data = np.arange(24, dtype='i4').reshape((2, 3, 4)) aff = np.eye(4) - klasses = [klass for klass in all_image_classes - if klass.rw and klass.makeable and issubclass(klass, SpatialImage)] + klasses = [ + klass + for klass in all_image_classes + if klass.rw and klass.makeable and issubclass(klass, SpatialImage) + ] for klass in klasses: file_map = klass.make_file_map() for key in file_map: diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 35c61e149b..e98fd473a0 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -1,4 +1,4 @@ -""" Test slicing of file-like objects """ +"""Test slicing of file-like objects""" from io import BytesIO @@ -9,12 +9,24 @@ import numpy as np -from ..fileslice import (is_fancy, canonical_slicers, fileslice, - predict_shape, read_segments, _positive_slice, - threshold_heuristic, optimize_slicer, slice2len, - fill_slicer, optimize_read_slicers, slicers2segments, - calc_slicedefs, _simple_fileslice, slice2outax, - strided_scalar) +from ..fileslice import ( + is_fancy, + canonical_slicers, + fileslice, + predict_shape, + read_segments, + _positive_slice, + threshold_heuristic, + optimize_slicer, + slice2len, + fill_slicer, + optimize_read_slicers, + slicers2segments, + calc_slicedefs, + _simple_fileslice, + slice2outax, + strided_scalar, +) import pytest from numpy.testing import assert_array_equal @@ -52,13 +64,7 @@ def test_is_fancy(): def test_canonical_slicers(): # Check transformation of sliceobj into canonical form - slicers = (slice(None), - slice(9), - slice(0, 9), - slice(1, 10), - slice(1, 10, 2), - 2, - np.array(2)) + slicers = (slice(None), slice(9), slice(0, 9), slice(1, 10), slice(1, 10, 2), 2, np.array(2)) shape = (10, 10) for slice0 in slicers: @@ -71,8 +77,9 @@ def test_canonical_slicers(): # Check None passes through assert canonical_slicers(sliceobj + (None,), shape) == sliceobj + (None,) assert canonical_slicers((None,) + sliceobj, shape) == (None,) + sliceobj - assert (canonical_slicers((None,) + sliceobj + (None,), shape) == - (None,) + sliceobj + (None,)) + assert canonical_slicers((None,) + sliceobj + (None,), shape) == (None,) + sliceobj + ( + None, + ) # Check Ellipsis assert canonical_slicers((Ellipsis,), shape) == (slice(None), slice(None)) assert canonical_slicers((Ellipsis, None), shape) == (slice(None), slice(None), None) @@ -80,8 +87,13 @@ def test_canonical_slicers(): assert canonical_slicers((1, Ellipsis), shape) == (1, slice(None)) # Ellipsis at end does nothing assert canonical_slicers((1, 1, Ellipsis), shape) == (1, 1) - assert (canonical_slicers((1, Ellipsis, 2), (10, 1, 2, 3, 11)) == - (1, slice(None), slice(None), slice(None), 2)) + assert canonical_slicers((1, Ellipsis, 2), (10, 1, 2, 3, 11)) == ( + 1, + slice(None), + slice(None), + slice(None), + 2, + ) with pytest.raises(ValueError): canonical_slicers((Ellipsis, 1, Ellipsis), (2, 3, 4, 5)) # Check full slices get expanded @@ -109,7 +121,14 @@ def test_canonical_slicers(): canonical_slicers((1, 10), shape, True) # Unless check_inds is False assert canonical_slicers((10,), shape, False) == (10, slice(None)) - assert canonical_slicers((1, 10,), shape, False) == (1, 10) + assert canonical_slicers( + ( + 1, + 10, + ), + shape, + False, + ) == (1, 10) # Check negative -> positive assert canonical_slicers(-1, shape) == (9, slice(None)) assert canonical_slicers((slice(None), -1), shape) == (slice(None), 9) @@ -150,20 +169,15 @@ def _slices_for_len(L): # Example slices for a dimension of length L if L == 0: raise ValueError('Need length > 0') - sdefs = [ - 0, - L // 2, - L - 1, - -1, - slice(None), - slice(L - 1)] + sdefs = [0, L // 2, L - 1, -1, slice(None), slice(L - 1)] if L > 1: sdefs += [ -2, slice(1, L - 1), slice(1, L - 1, 2), slice(L - 1, 1, -1), - slice(L - 1, 1, -2)] + slice(L - 1, 1, -2), + ] return tuple(sdefs) @@ -276,24 +290,26 @@ def test_optimize_slicer(): for is_slowest in (True, False): # following tests not affected by all_full or optimization # full - always passes through - assert ( - optimize_slicer(slice(None), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) + assert optimize_slicer(slice(None), 10, all_full, is_slowest, 4, heuristic) == ( + slice(None), + slice(None), + ) # Even if full specified with explicit values - assert ( - optimize_slicer(slice(10), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) - assert ( - optimize_slicer(slice(0, 10), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) - assert ( - optimize_slicer(slice(0, 10, 1), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) + assert optimize_slicer(slice(10), 10, all_full, is_slowest, 4, heuristic) == ( + slice(None), + slice(None), + ) + assert optimize_slicer(slice(0, 10), 10, all_full, is_slowest, 4, heuristic) == ( + slice(None), + slice(None), + ) + assert optimize_slicer( + slice(0, 10, 1), 10, all_full, is_slowest, 4, heuristic + ) == (slice(None), slice(None)) # Reversed full is still full, but with reversed post_slice - assert ( - optimize_slicer( - slice(None, None, -1), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None, None, -1))) + assert optimize_slicer( + slice(None, None, -1), 10, all_full, is_slowest, 4, heuristic + ) == (slice(None), slice(None, None, -1)) # Contiguous is contiguous unless heuristic kicks in, in which case it may # be 'full' assert optimize_slicer(slice(9), 10, False, False, 4, _always) == (slice(0, 9, 1), slice(None)) @@ -303,48 +319,78 @@ def test_optimize_slicer(): assert optimize_slicer(slice(9), 10, True, True, 4, _always) == (slice(0, 9, 1), slice(None)) # Nor if the heuristic won't update assert optimize_slicer(slice(9), 10, True, False, 4, _never) == (slice(0, 9, 1), slice(None)) - assert (optimize_slicer(slice(1, 10), 10, True, False, 4, _never) == - (slice(1, 10, 1), slice(None))) + assert optimize_slicer(slice(1, 10), 10, True, False, 4, _never) == ( + slice(1, 10, 1), + slice(None), + ) # Reversed contiguous still contiguous - assert (optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == - (slice(0, 9, 1), slice(None, None, -1))) - assert (optimize_slicer(slice(8, None, -1), 10, True, False, 4, _always) == - (slice(None), slice(8, None, -1))) - assert (optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == - (slice(0, 9, 1), slice(None, None, -1))) - assert (optimize_slicer(slice(9, 0, -1), 10, False, False, 4, _never) == - (slice(1, 10, 1), slice(None, None, -1))) + assert optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == ( + slice(0, 9, 1), + slice(None, None, -1), + ) + assert optimize_slicer(slice(8, None, -1), 10, True, False, 4, _always) == ( + slice(None), + slice(8, None, -1), + ) + assert optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == ( + slice(0, 9, 1), + slice(None, None, -1), + ) + assert optimize_slicer(slice(9, 0, -1), 10, False, False, 4, _never) == ( + slice(1, 10, 1), + slice(None, None, -1), + ) # Non-contiguous - assert (optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _never) == - (slice(0, 10, 2), slice(None))) + assert optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _never) == ( + slice(0, 10, 2), + slice(None), + ) # all_full triggers optimization, but optimization does nothing - assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _never) == - (slice(0, 10, 2), slice(None))) + assert optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _never) == ( + slice(0, 10, 2), + slice(None), + ) # all_full triggers optimization, optimization does something - assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == - (slice(None), slice(0, 10, 2))) + assert optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == ( + slice(None), + slice(0, 10, 2), + ) # all_full disables optimization, optimization does something - assert (optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _always) == - (slice(0, 10, 2), slice(None))) + assert optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _always) == ( + slice(0, 10, 2), + slice(None), + ) # Non contiguous, reversed - assert (optimize_slicer(slice(10, None, -2), 10, False, False, 4, _never) == - (slice(1, 10, 2), slice(None, None, -1))) - assert (optimize_slicer(slice(10, None, -2), 10, True, False, 4, _always) == - (slice(None), slice(9, None, -2))) + assert optimize_slicer(slice(10, None, -2), 10, False, False, 4, _never) == ( + slice(1, 10, 2), + slice(None, None, -1), + ) + assert optimize_slicer(slice(10, None, -2), 10, True, False, 4, _always) == ( + slice(None), + slice(9, None, -2), + ) # Short non-contiguous - assert (optimize_slicer(slice(2, 8, 2), 10, False, False, 4, _never) == - (slice(2, 8, 2), slice(None))) + assert optimize_slicer(slice(2, 8, 2), 10, False, False, 4, _never) == ( + slice(2, 8, 2), + slice(None), + ) # with partial read - assert (optimize_slicer(slice(2, 8, 2), 10, True, False, 4, _partial) == - (slice(2, 8, 1), slice(None, None, 2))) + assert optimize_slicer(slice(2, 8, 2), 10, True, False, 4, _partial) == ( + slice(2, 8, 1), + slice(None, None, 2), + ) # If this is the slowest changing dimension, heuristic can upgrade None to # contiguous, but not (None, contiguous) to full # we've done this one already - assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == - (slice(None), slice(0, 10, 2))) + assert optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == ( + slice(None), + slice(0, 10, 2), + ) # if slowest, just upgrade to contiguous - assert (optimize_slicer(slice(0, 10, 2), 10, True, True, 4, _always) == - (slice(0, 10, 1), slice(None, None, 2))) + assert optimize_slicer(slice(0, 10, 2), 10, True, True, 4, _always) == ( + slice(0, 10, 1), + slice(None, None, 2), + ) # contiguous does not upgrade to full assert optimize_slicer(slice(9), 10, True, True, 4, _always) == (slice(0, 9, 1), slice(None)) # integer @@ -365,70 +411,109 @@ def test_optimize_slicer(): def test_optimize_read_slicers(): # Test function to optimize read slicers assert optimize_read_slicers((1,), (10,), 4, _never) == ((1,), ()) - assert (optimize_read_slicers((slice(None),), (10,), 4, _never) == - ((slice(None),), (slice(None),))) - assert (optimize_read_slicers((slice(9),), (10,), 4, _never) == - ((slice(0, 9, 1),), (slice(None),))) + assert optimize_read_slicers((slice(None),), (10,), 4, _never) == ( + (slice(None),), + (slice(None),), + ) + assert optimize_read_slicers((slice(9),), (10,), 4, _never) == ( + (slice(0, 9, 1),), + (slice(None),), + ) # optimize cannot update a continuous to a full if last - assert (optimize_read_slicers((slice(9),), (10,), 4, _always) == - ((slice(0, 9, 1),), (slice(None),))) + assert optimize_read_slicers((slice(9),), (10,), 4, _always) == ( + (slice(0, 9, 1),), + (slice(None),), + ) # optimize can update non-contiguous to continuous even if last # not optimizing - assert (optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _never) == - ((slice(0, 9, 2),), (slice(None),))) + assert optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _never) == ( + (slice(0, 9, 2),), + (slice(None),), + ) # optimizing - assert (optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _always) == - ((slice(0, 9, 1),), (slice(None, None, 2),))) + assert optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _always) == ( + (slice(0, 9, 1),), + (slice(None, None, 2),), + ) # Optimize does nothing for integer when last assert optimize_read_slicers((1,), (10,), 4, _always) == ((1,), ()) # 2D - assert (optimize_read_slicers((slice(None), slice(None)), (10, 6), 4, _never) == - ((slice(None), slice(None)), (slice(None), slice(None)))) - assert (optimize_read_slicers((slice(None), 1), (10, 6), 4, _never) == - ((slice(None), 1), (slice(None),))) - assert (optimize_read_slicers((1, slice(None)), (10, 6), 4, _never) == - ((1, slice(None)), (slice(None),))) + assert optimize_read_slicers((slice(None), slice(None)), (10, 6), 4, _never) == ( + (slice(None), slice(None)), + (slice(None), slice(None)), + ) + assert optimize_read_slicers((slice(None), 1), (10, 6), 4, _never) == ( + (slice(None), 1), + (slice(None),), + ) + assert optimize_read_slicers((1, slice(None)), (10, 6), 4, _never) == ( + (1, slice(None)), + (slice(None),), + ) # Not optimizing a partial slice - assert (optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _never) == - ((slice(0, 9, 1), slice(None)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _never) == ( + (slice(0, 9, 1), slice(None)), + (slice(None), slice(None)), + ) # Optimizing a partial slice - assert (optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _always) == - ((slice(None), slice(None)), (slice(0, 9, 1), slice(None)))) + assert optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _always) == ( + (slice(None), slice(None)), + (slice(0, 9, 1), slice(None)), + ) # Optimize cannot update a continuous to a full if last - assert (optimize_read_slicers((slice(None), slice(5)), (10, 6), 4, _always) == - ((slice(None), slice(0, 5, 1)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(None), slice(5)), (10, 6), 4, _always) == ( + (slice(None), slice(0, 5, 1)), + (slice(None), slice(None)), + ) # optimize can update non-contiguous to full if not last # not optimizing - assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _never) == - ((slice(0, 9, 3), slice(None)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _never) == ( + (slice(0, 9, 3), slice(None)), + (slice(None), slice(None)), + ) # optimizing full - assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _always) == - ((slice(None), slice(None)), (slice(0, 9, 3), slice(None)))) + assert optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _always) == ( + (slice(None), slice(None)), + (slice(0, 9, 3), slice(None)), + ) # optimizing partial - assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _partial) == - ((slice(0, 9, 1), slice(None)), (slice(None, None, 3), slice(None)))) + assert optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _partial) == ( + (slice(0, 9, 1), slice(None)), + (slice(None, None, 3), slice(None)), + ) # optimize can update non-contiguous to continuous even if last # not optimizing - assert (optimize_read_slicers((slice(None), slice(0, 5, 2)), (10, 6), 4, _never) == - ((slice(None), slice(0, 5, 2)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(None), slice(0, 5, 2)), (10, 6), 4, _never) == ( + (slice(None), slice(0, 5, 2)), + (slice(None), slice(None)), + ) # optimizing - assert (optimize_read_slicers((slice(None), slice(0, 5, 2),), (10, 6), 4, _always) == - ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2)))) + assert optimize_read_slicers( + ( + slice(None), + slice(0, 5, 2), + ), + (10, 6), + 4, + _always, + ) == ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2))) # Optimize does nothing for integer when last - assert (optimize_read_slicers((slice(None), 1), (10, 6), 4, _always) == - ((slice(None), 1), (slice(None),))) + assert optimize_read_slicers((slice(None), 1), (10, 6), 4, _always) == ( + (slice(None), 1), + (slice(None),), + ) # Check gap threshold with 3D _depends0 = partial(threshold_heuristic, skip_thresh=10 * 4 - 1) _depends1 = partial(threshold_heuristic, skip_thresh=10 * 4) - assert (optimize_read_slicers( - (slice(9), slice(None), slice(None)), (10, 6, 2), 4, _depends0) == - ((slice(None), slice(None), slice(None)), (slice(0, 9, 1), slice(None), slice(None)))) - assert (optimize_read_slicers( - (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends0) == - ((slice(None), slice(0, 5, 1), slice(None)), (slice(None), slice(None), slice(None)))) - assert (optimize_read_slicers( - (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends1) == - ((slice(None), slice(None), slice(None)), (slice(None), slice(0, 5, 1), slice(None)))) + assert optimize_read_slicers( + (slice(9), slice(None), slice(None)), (10, 6, 2), 4, _depends0 + ) == ((slice(None), slice(None), slice(None)), (slice(0, 9, 1), slice(None), slice(None))) + assert optimize_read_slicers( + (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends0 + ) == ((slice(None), slice(0, 5, 1), slice(None)), (slice(None), slice(None), slice(None))) + assert optimize_read_slicers( + (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends1 + ) == ((slice(None), slice(None), slice(None)), (slice(None), slice(0, 5, 1), slice(None))) # Check longs as integer slices sn = slice(None) assert optimize_read_slicers((1, 2, 3), (2, 3, 4), 4, _always) == ((sn, sn, 3), (1, 2)) @@ -440,94 +525,85 @@ def test_slicers2segments(): assert slicers2segments((0, 1), (10, 6), 7, 4) == [[7 + 10 * 4, 4]] assert slicers2segments((0, 1, 2), (10, 6, 4), 7, 4) == [[7 + 10 * 4 + 10 * 6 * 2 * 4, 4]] assert slicers2segments((slice(None),), (10,), 7, 4) == [[7, 10 * 4]] - assert (slicers2segments((0, slice(None)), (10, 6), 7, 4) == - [[7 + 10 * 4 * i, 4] for i in range(6)]) + assert slicers2segments((0, slice(None)), (10, 6), 7, 4) == [ + [7 + 10 * 4 * i, 4] for i in range(6) + ] assert slicers2segments((slice(None), 0), (10, 6), 7, 4) == [[7, 10 * 4]] assert slicers2segments((slice(None), slice(None)), (10, 6), 7, 4) == [[7, 10 * 6 * 4]] - assert (slicers2segments((slice(None), slice(None), 2), (10, 6, 4), 7, 4) == - [[7 + 10 * 6 * 2 * 4, 10 * 6 * 4]]) + assert slicers2segments((slice(None), slice(None), 2), (10, 6, 4), 7, 4) == [ + [7 + 10 * 6 * 2 * 4, 10 * 6 * 4] + ] def test_calc_slicedefs(): # Check get_segments routine. The tests aren't well organized because I # wrote them after the code. We live and (fail to) learn - segments, out_shape, new_slicing = calc_slicedefs( - (1,), (10,), 4, 7, 'F', _never) + segments, out_shape, new_slicing = calc_slicedefs((1,), (10,), 4, 7, 'F', _never) assert segments == [[11, 4]] assert new_slicing == () assert out_shape == () - assert ( - calc_slicedefs((slice(None),), (10,), 4, 7, 'F', _never) == - ([[7, 40]], - (10,), - (), - )) - assert ( - calc_slicedefs((slice(9),), (10,), 4, 7, 'F', _never) == - ([[7, 36]], - (9,), - (), - )) - assert ( - calc_slicedefs((slice(1, 9),), (10,), 4, 7, 'F', _never) == - ([[11, 32]], - (8,), - (), - )) + assert calc_slicedefs((slice(None),), (10,), 4, 7, 'F', _never) == ( + [[7, 40]], + (10,), + (), + ) + assert calc_slicedefs((slice(9),), (10,), 4, 7, 'F', _never) == ( + [[7, 36]], + (9,), + (), + ) + assert calc_slicedefs((slice(1, 9),), (10,), 4, 7, 'F', _never) == ( + [[11, 32]], + (8,), + (), + ) # Two dimensions, single slice - assert ( - calc_slicedefs((0,), (10, 6), 4, 7, 'F', _never) == - ([[7, 4], [47, 4], [87, 4], [127, 4], [167, 4], [207, 4]], - (6,), - (), - )) - assert ( - calc_slicedefs((0,), (10, 6), 4, 7, 'C', _never) == - ([[7, 6 * 4]], - (6,), - (), - )) + assert calc_slicedefs((0,), (10, 6), 4, 7, 'F', _never) == ( + [[7, 4], [47, 4], [87, 4], [127, 4], [167, 4], [207, 4]], + (6,), + (), + ) + assert calc_slicedefs((0,), (10, 6), 4, 7, 'C', _never) == ( + [[7, 6 * 4]], + (6,), + (), + ) # Two dimensions, contiguous not full - assert ( - calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'F', _never) == - ([[51, 4], [91, 4], [131, 4], [171, 4]], - (4,), - (), - )) - assert ( - calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'C', _never) == - ([[7 + 7 * 4, 16]], - (4,), - (), - )) + assert calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'F', _never) == ( + [[51, 4], [91, 4], [131, 4], [171, 4]], + (4,), + (), + ) + assert calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'C', _never) == ( + [[7 + 7 * 4, 16]], + (4,), + (), + ) # With full slice first - assert ( - calc_slicedefs((slice(None), slice(1, 5)), (10, 6), 4, 7, 'F', _never) == - ([[47, 160]], - (10, 4), - (), - )) + assert calc_slicedefs((slice(None), slice(1, 5)), (10, 6), 4, 7, 'F', _never) == ( + [[47, 160]], + (10, 4), + (), + ) # Check effect of heuristic on calc_slicedefs # Even integer slices can generate full when heuristic says so - assert ( - calc_slicedefs((1, slice(None)), (10, 6), 4, 7, 'F', _always) == - ([[7, 10 * 6 * 4]], - (10, 6), - (1, slice(None)), - )) + assert calc_slicedefs((1, slice(None)), (10, 6), 4, 7, 'F', _always) == ( + [[7, 10 * 6 * 4]], + (10, 6), + (1, slice(None)), + ) # Except when last - assert ( - calc_slicedefs((slice(None), 1), (10, 6), 4, 7, 'F', _always) == - ([[7 + 10 * 4, 10 * 4]], - (10,), - (), - )) + assert calc_slicedefs((slice(None), 1), (10, 6), 4, 7, 'F', _always) == ( + [[7 + 10 * 4, 10 * 4]], + (10,), + (), + ) def test_predict_shape(): shapes = (15, 16, 17, 18) for n_dim in range(len(shapes)): - shape = shapes[:n_dim + 1] + shape = shapes[: n_dim + 1] arr = np.arange(np.prod(shape)).reshape(shape) slicers_list = [] for i in range(n_dim): @@ -548,8 +624,16 @@ def test_predict_shape(): def test_strided_scalar(): # Utility to make numpy array of given shape from scalar using striding for shape, scalar in product( - ((2,), (2, 3,), (2, 3, 4)), - (1, 2, np.int16(3))): + ( + (2,), + ( + 2, + 3, + ), + (2, 3, 4), + ), + (1, 2, np.int16(3)), + ): expected = np.zeros(shape, dtype=np.array(scalar).dtype) + scalar observed = strided_scalar(shape, scalar) assert_array_equal(observed, expected) @@ -563,6 +647,7 @@ def test_strided_scalar(): def setval(x): x[..., 0] = 99 + # RuntimeError for numpy < 1.10 with pytest.raises((RuntimeError, ValueError)): setval(observed) @@ -582,10 +667,8 @@ def test_read_segments(): fobj.write(arr.tobytes()) _check_bytes(read_segments(fobj, [(0, 200)], 200), arr) _check_bytes(read_segments(fobj, [(0, 100), (100, 100)], 200), arr) - _check_bytes(read_segments(fobj, [(0, 50), (100, 50)], 100), - np.r_[arr[:25], arr[50:75]]) - _check_bytes(read_segments(fobj, [(10, 40), (100, 50)], 90), - np.r_[arr[5:25], arr[50:75]]) + _check_bytes(read_segments(fobj, [(0, 50), (100, 50)], 100), np.r_[arr[:25], arr[50:75]]) + _check_bytes(read_segments(fobj, [(10, 40), (100, 50)], 90), np.r_[arr[5:25], arr[50:75]]) _check_bytes(read_segments(fobj, [], 0), arr[0:0]) # Error conditions with pytest.raises(ValueError): @@ -626,7 +709,7 @@ def random_segments(nsegs): # Get the data that should be returned for the given segments def get_expected(segs): - segs = [arr[off:off + length] for off, length in segs] + segs = [arr[off : off + length] for off, length in segs] return np.concatenate(segs) # Read from the file, check the result. We do this task simultaneously in @@ -658,8 +741,7 @@ def _check_slicer(sliceobj, arr, fobj, offset, order, heuristic=threshold_heuris def slicer_samples(shape): - """ Generator returns slice samples for given `shape` - """ + """Generator returns slice samples for given `shape`""" ndim = len(shape) slicers_list = [] for i in range(ndim): @@ -742,5 +824,6 @@ def test_fileslice_heuristic(): _check_slicer(sliceobj, arr, fobj, 0, order, heuristic) # Check _simple_fileslice while we're at it - si como no? new_slice = _simple_fileslice( - fobj, sliceobj, arr.shape, arr.dtype, 0, order, heuristic) + fobj, sliceobj, arr.shape, arr.dtype, 0, order, heuristic + ) assert_array_equal(arr[sliceobj], new_slice) diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index ffd7d91b6a..3544b88977 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing fileutils module +"""Testing fileutils module """ diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index a08a24d102..62df671aca 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -1,14 +1,25 @@ -""" Test floating point deconstructions and floor methods +"""Test floating point deconstructions and floor methods """ import sys import numpy as np -from ..casting import (floor_exact, ceil_exact, as_int, FloatingError, - int_to_float, floor_log2, type_info, _check_nmant, - _check_maxexp, ok_floats, on_powerpc, have_binary128, - longdouble_precision_improved) +from ..casting import ( + floor_exact, + ceil_exact, + as_int, + FloatingError, + int_to_float, + floor_log2, + type_info, + _check_nmant, + _check_maxexp, + ok_floats, + on_powerpc, + have_binary128, + longdouble_precision_improved, +) from ..testing import suppress_warnings import pytest @@ -19,13 +30,17 @@ def dtt2dict(dtt): - """ Create info dictionary from numpy type - """ + """Create info dictionary from numpy type""" info = np.finfo(dtt) - return dict(min=info.min, max=info.max, - nexp=info.nexp, nmant=info.nmant, - minexp=info.minexp, maxexp=info.maxexp, - width=np.dtype(dtt).itemsize) + return dict( + min=info.min, + max=info.max, + nexp=info.nexp, + nmant=info.nmant, + minexp=info.minexp, + maxexp=info.maxexp, + width=np.dtype(dtt).itemsize, + ) def test_type_info(): @@ -33,10 +48,18 @@ def test_type_info(): for dtt in np.sctypes['int'] + np.sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) - assert dict(min=info.min, max=info.max, - nexp=None, nmant=None, - minexp=None, maxexp=None, - width=np.dtype(dtt).itemsize) == infod + assert ( + dict( + min=info.min, + max=info.max, + nexp=None, + nmant=None, + minexp=None, + maxexp=None, + width=np.dtype(dtt).itemsize, + ) + == infod + ) assert infod['min'].dtype.type == dtt assert infod['max'].dtype.type == dtt for dtt in IEEE_floats + [np.complex64, np.complex64]: @@ -51,10 +74,13 @@ def test_type_info(): vals = tuple(ld_dict[k] for k in ('nmant', 'nexp', 'width')) # Information for PPC head / tail doubles from: # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html - if vals in ((52, 11, 8), # longdouble is same as double - (63, 15, 12), (63, 15, 16), # intel 80 bit - (112, 15, 16), # real float128 - (106, 11, 16)): # PPC head, tail doubles, expected values + if vals in ( + (52, 11, 8), # longdouble is same as double + (63, 15, 12), + (63, 15, 16), # intel 80 bit + (112, 15, 16), # real float128 + (106, 11, 16), + ): # PPC head, tail doubles, expected values pass elif vals == (105, 11, 16): # bust info for PPC head / tail longdoubles # min and max broken, copy from infod @@ -67,7 +93,7 @@ def test_type_info(): ld_dict = dbl_dict.copy() ld_dict['width'] = width else: - raise ValueError(f"Unexpected float type {np.longdouble} to test") + raise ValueError(f'Unexpected float type {np.longdouble} to test') assert ld_dict == infod @@ -122,7 +148,7 @@ def test_as_int(): except FloatingError: nmant = 63 # Unknown precision, let's hope it's at least 63 v = np.longdouble(2) ** (nmant + 1) - 1 - assert as_int(v) == 2**(nmant + 1) - 1 + assert as_int(v) == 2 ** (nmant + 1) - 1 # Check for predictable overflow nexp64 = floor_log2(type_info(np.float64)['max']) with np.errstate(over='ignore'): @@ -145,7 +171,7 @@ def test_int_to_float(): # IEEEs in this case are binary formats only nexp = floor_log2(type_info(ie3)['max']) # Values too large for the format - smn, smx = -2**(nexp + 1), 2**(nexp + 1) + smn, smx = -(2 ** (nexp + 1)), 2 ** (nexp + 1) if ie3 is np.float64: with pytest.raises(OverflowError): int_to_float(smn, ie3) @@ -165,7 +191,7 @@ def test_int_to_float(): assert int_to_float(-i, LD) == LD(-i) # Above max of float64, we're hosed nexp64 = floor_log2(type_info(np.float64)['max']) - smn64, smx64 = -2**(nexp64 + 1), 2**(nexp64 + 1) + smn64, smx64 = -(2 ** (nexp64 + 1)), 2 ** (nexp64 + 1) # The algorithm here implemented goes through float64, so supermax and # supermin will cause overflow errors with pytest.raises(OverflowError): @@ -177,7 +203,7 @@ def test_int_to_float(): except FloatingError: # don't know where to test return # test we recover precision just above nmant - i = 2**(nmant + 1) - 1 + i = 2 ** (nmant + 1) - 1 assert as_int(int_to_float(i, LD)) == i assert as_int(int_to_float(-i, LD)) == -i # If longdouble can cope with 2**64, test @@ -200,7 +226,7 @@ def test_as_int_np_fix(): def test_floor_exact_16(): # A normal integer can generate an inf in float16 assert floor_exact(2**31, np.float16) == np.inf - assert floor_exact(-2**31, np.float16) == -np.inf + assert floor_exact(-(2**31), np.float16) == -np.inf def test_floor_exact_64(): @@ -212,8 +238,8 @@ def test_floor_exact_64(): assert len(gaps) == 1 gap = gaps.pop() assert gap == int(gap) - test_val = 2**(e + 1) - 1 - assert floor_exact(test_val, np.float64) == 2**(e + 1) - int(gap) + test_val = 2 ** (e + 1) - 1 + assert floor_exact(test_val, np.float64) == 2 ** (e + 1) - int(gap) def test_floor_exact(): @@ -235,8 +261,8 @@ def test_floor_exact(): assert floor_exact(2**5000, t) == np.inf assert ceil_exact(2**5000, t) == np.inf # A number more negative returns -inf - assert floor_exact(-2**5000, t) == -np.inf - assert ceil_exact(-2**5000, t) == -np.inf + assert floor_exact(-(2**5000), t) == -np.inf + assert ceil_exact(-(2**5000), t) == -np.inf # Check around end of integer precision nmant = info['nmant'] for i in range(nmant + 1): @@ -247,16 +273,14 @@ def test_floor_exact(): assert func(-iv, t) == -iv assert func(iv - 1, t) == iv - 1 assert func(-iv + 1, t) == -iv + 1 - if t is np.longdouble and ( - on_powerpc() or - longdouble_precision_improved()): + if t is np.longdouble and (on_powerpc() or longdouble_precision_improved()): # The nmant value for longdouble on PPC appears to be conservative, # so that the tests for behavior above the nmant range fail. # windows longdouble can change from float64 to Intel80 in some # situations, in which case nmant will not be correct continue # Confirm to ourselves that 2**(nmant+1) can't be exactly represented - iv = 2**(nmant + 1) + iv = 2 ** (nmant + 1) assert int_flex(iv + 1, t) == iv assert int_ceex(iv + 1, t) == iv + 2 # negatives @@ -265,8 +289,8 @@ def test_floor_exact(): # The gap in representable numbers is 2 above 2**(nmant+1), 4 above # 2**(nmant+2), and so on. for i in range(5): - iv = 2**(nmant + 1 + i) - gap = 2**(i + 1) + iv = 2 ** (nmant + 1 + i) + gap = 2 ** (i + 1) assert as_int(t(iv) + t(gap)) == iv + gap for j in range(1, gap): assert int_flex(iv + j, t) == iv @@ -286,6 +310,8 @@ def test_usable_binary128(): yes = have_binary128() with np.errstate(over='ignore'): exp_test = np.longdouble(2) ** 16383 - assert yes == (exp_test.dtype.itemsize == 16 and - np.isfinite(exp_test) and - _check_nmant(np.longdouble, 112)) + assert yes == ( + exp_test.dtype.itemsize == 16 + and np.isfinite(exp_test) + and _check_nmant(np.longdouble, 112) + ) diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 44266f25fd..e1a7ec9264 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for image funcs """ +"""Test for image funcs""" import numpy as np @@ -37,9 +37,7 @@ def test_concat(): concat_images([]) # Build combinations of 3D, 4D w/size[3] == 1, and 4D w/size[3] == 3 - all_shapes_5D = ((1, 4, 5, 3, 3), - (7, 3, 1, 4, 5), - (0, 2, 1, 4, 5)) + all_shapes_5D = ((1, 4, 5, 3, 3), (7, 3, 1, 4, 5), (0, 2, 1, 4, 5)) affine = np.eye(4) for dim in range(2, 6): @@ -61,7 +59,7 @@ def test_concat(): img2_mem = Nifti1Image(data1, affine + 1) # bad affine # Loop over every possible axis, including None (explicit and implied) - for axis in (list(range(-(dim - 2), (dim - 1))) + [None, '__default__']): + for axis in list(range(-(dim - 2), (dim - 1))) + [None, '__default__']: # Allow testing default vs. passing explicit param if axis == '__default__': @@ -83,12 +81,12 @@ def test_concat(): # 3D and the same size) fails, so we also # have to expect errors for those. if axis is None: # 3D from here and below - all_data = np.concatenate([data0[..., np.newaxis], - data1[..., np.newaxis]], - **np_concat_kwargs) + all_data = np.concatenate( + [data0[..., np.newaxis], data1[..., np.newaxis]], + **np_concat_kwargs, + ) else: # both 3D, appending on final axis - all_data = np.concatenate([data0, data1], - **np_concat_kwargs) + all_data = np.concatenate([data0, data1], **np_concat_kwargs) expect_error = False except ValueError: # Shapes are not combinable @@ -102,12 +100,13 @@ def test_concat(): imgs_mixed = [imgs[0], img_files[1], imgs[2]] for img0, img1, img2 in (imgs, img_files, imgs_mixed): try: - all_imgs = concat_images([img0, img1], - **concat_imgs_kwargs) + all_imgs = concat_images([img0, img1], **concat_imgs_kwargs) except ValueError as ve: assert expect_error, str(ve) else: - assert not expect_error, "Expected a concatenation error, but got none." + assert ( + not expect_error + ), 'Expected a concatenation error, but got none.' assert_array_equal(all_imgs.get_fdata(), all_data) assert_array_equal(all_imgs.affine, affine) @@ -121,7 +120,9 @@ def test_concat(): except ValueError as ve: assert expect_error, str(ve) else: - assert not expect_error, "Expected a concatenation error, but got none." + assert ( + not expect_error + ), 'Expected a concatenation error, but got none.' assert_array_equal(all_imgs.get_fdata(), all_data) assert_array_equal(all_imgs.affine, affine) @@ -176,7 +177,8 @@ def test_closest_canonical(): # an axis swap aff = np.diag([1, 0, 0, 1]) - aff[1, 2] = 1; aff[2, 1] = 1 + aff[1, 2] = 1 + aff[2, 1] = 1 img = Nifti1Image(arr, aff) img.header.set_dim_info(0, 1, 2) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index a12227a894..57a0322cab 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -1,4 +1,4 @@ -""" Validate image API +"""Validate image API What is the image API? @@ -32,13 +32,24 @@ import numpy as np from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy') _, have_h5py, _ = optional_package('h5py') -from .. import (AnalyzeImage, Spm99AnalyzeImage, Spm2AnalyzeImage, - Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, - GiftiImage, - MGHImage, Minc1Image, Minc2Image, is_proxy) +from .. import ( + AnalyzeImage, + Spm99AnalyzeImage, + Spm2AnalyzeImage, + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Nifti2Image, + GiftiImage, + MGHImage, + Minc1Image, + Minc2Image, + is_proxy, +) from ..spatialimages import SpatialImage from .. import minc1, minc2, parrec, brikhead from ..deprecator import ExpiredDeprecationError @@ -47,8 +58,14 @@ import pytest from numpy.testing import assert_almost_equal, assert_array_equal, assert_warns, assert_allclose -from nibabel.testing import (bytesio_round_trip, bytesio_filemap, assert_data_similar, - clear_and_catch_warnings, nullcontext, expires) +from nibabel.testing import ( + bytesio_round_trip, + bytesio_filemap, + assert_data_similar, + clear_and_catch_warnings, + nullcontext, + expires, +) from ..tmpdirs import InTemporaryDirectory from .test_api_validators import ValidateAPI @@ -65,7 +82,8 @@ def maybe_deprecated(meth_name): class GenericImageAPI(ValidateAPI): - """ General image validation API """ + """General image validation API""" + # Whether this image type can do scaling of data has_scaling = False # Whether the image can be saved to disk / file objects @@ -75,7 +93,7 @@ class GenericImageAPI(ValidateAPI): standard_extension = '.img' def obj_params(self): - """ Return generator returning (`img_creator`, `img_params`) tuples + """Return generator returning (`img_creator`, `img_params`) tuples ``img_creator`` is a function taking no arguments and returning a fresh image. We need to return this ``img_creator`` function rather than an @@ -150,18 +168,18 @@ def validate_filenames(self, imaker, params): # to_ / from_ filename fname = 'another_image' + self.standard_extension for path in (fname, pathlib.Path(fname)): - with InTemporaryDirectory(): - # Validate that saving or loading a file doesn't use deprecated methods internally - with clear_and_catch_warnings() as w: - warnings.filterwarnings('error', - category=DeprecationWarning, - module=r"nibabel.*") - img.to_filename(path) - rt_img = img.__class__.from_filename(path) - assert_array_equal(img.shape, rt_img.shape) - assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) - assert_almost_equal(np.asanyarray(img.dataobj), np.asanyarray(rt_img.dataobj)) - del rt_img # to allow windows to delete the directory + with InTemporaryDirectory(): + # Validate that saving or loading a file doesn't use deprecated methods internally + with clear_and_catch_warnings() as w: + warnings.filterwarnings( + 'error', category=DeprecationWarning, module=r'nibabel.*' + ) + img.to_filename(path) + rt_img = img.__class__.from_filename(path) + assert_array_equal(img.shape, rt_img.shape) + assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) + assert_almost_equal(np.asanyarray(img.dataobj), np.asanyarray(rt_img.dataobj)) + del rt_img # to allow windows to delete the directory def validate_no_slicing(self, imaker, params): img = imaker() @@ -170,7 +188,7 @@ def validate_no_slicing(self, imaker, params): with pytest.raises(TypeError): img[:] - @expires("5.0.0") + @expires('5.0.0') def validate_get_data_deprecated(self, imaker, params): img = imaker() with pytest.deprecated_call(): @@ -179,7 +197,7 @@ def validate_get_data_deprecated(self, imaker, params): class GetSetDtypeMixin: - """ Adds dtype tests + """Adds dtype tests Add this one if your image has ``get_data_dtype`` and ``set_data_dtype``. """ @@ -204,11 +222,12 @@ def validate_dtype(self, imaker, params): class DataInterfaceMixin(GetSetDtypeMixin): - """ Test dataobj interface for images with array backing + """Test dataobj interface for images with array backing Use this mixin if your image has a ``dataobj`` property that contains an array or an array-like thing. """ + meth_names = ('get_fdata',) def validate_data_interface(self, imaker, params): @@ -343,8 +362,7 @@ def _check_array_interface(self, imaker, meth_name): def _check_array_caching(self, imaker, meth_name, caching): img = imaker() method = getattr(img, meth_name) - get_data_func = (method if caching is None else - partial(method, caching=caching)) + get_data_func = method if caching is None else partial(method, caching=caching) assert isinstance(img.dataobj, np.ndarray) assert img.in_memory with maybe_deprecated(meth_name): @@ -445,7 +463,7 @@ def validate_mmap_parameter(self, imaker, params): class HeaderShapeMixin: - """ Tests that header shape can be set and got + """Tests that header shape can be set and got Add this one of your header supports ``get_data_shape`` and ``set_data_shape``. @@ -463,7 +481,7 @@ def validate_header_shape(self, imaker, params): class AffineMixin: - """ Adds test of affine property, method + """Adds test of affine property, method Add this one if your image has an ``affine`` property. """ @@ -498,17 +516,17 @@ def validate_file_stream_equivalence(self, imaker, params): fname = 'img' + self.standard_extension img.to_filename(fname) - with open("stream", "wb") as fobj: + with open('stream', 'wb') as fobj: img.to_stream(fobj) # Check that writing gets us the same thing contents1 = pathlib.Path(fname).read_bytes() - contents2 = pathlib.Path("stream").read_bytes() + contents2 = pathlib.Path('stream').read_bytes() assert contents1 == contents2 # Check that reading gets us the same thing img_a = klass.from_filename(fname) - with open(fname, "rb") as fobj: + with open(fname, 'rb') as fobj: img_b = klass.from_stream(fobj) # This needs to happen while the filehandle is open assert np.array_equal(img_a.get_fdata(), img_b.get_fdata()) @@ -548,9 +566,9 @@ def validate_from_url(self, imaker, params): img = imaker() img_bytes = img.to_bytes() - server.expect_oneshot_request("/img").respond_with_data(img_bytes) - url = server.url_for("/img") - assert url.startswith("http://") # Check we'll trigger an HTTP handler + server.expect_oneshot_request('/img').respond_with_data(img_bytes) + url = server.url_for('/img') + assert url.startswith('http://') # Check we'll trigger an HTTP handler rt_img = img.__class__.from_url(url) assert rt_img.to_bytes() == img_bytes @@ -564,20 +582,20 @@ def validate_from_file_url(self, imaker, params): img = imaker() import uuid + fname = tmp_path / f'img-{uuid.uuid4()}{self.standard_extension}' img.to_filename(fname) - rt_img = img.__class__.from_url(f"file:///{fname}") + rt_img = img.__class__.from_url(f'file:///{fname}') assert self._header_eq(img.header, rt_img.header) assert np.array_equal(img.get_fdata(), rt_img.get_fdata()) del img del rt_img - @staticmethod def _header_eq(header_a, header_b): - """ Header equality check that can be overridden by a subclass of this test + """Header equality check that can be overridden by a subclass of this test This allows us to retain the same tests above when testing an image that uses an abstract class as a header, namely when testing the FileBasedImage API, which @@ -586,11 +604,9 @@ def _header_eq(header_a, header_b): return header_a == header_b -class LoadImageAPI(GenericImageAPI, - DataInterfaceMixin, - AffineMixin, - GetSetDtypeMixin, - HeaderShapeMixin): +class LoadImageAPI( + GenericImageAPI, DataInterfaceMixin, AffineMixin, GetSetDtypeMixin, HeaderShapeMixin +): # Callable returning an image from a filename loader = None # Sequence of dictionaries, where dictionaries have keys @@ -613,8 +629,8 @@ def validate_path_maybe_image(self, imaker, params): class MakeImageAPI(LoadImageAPI): - """ Validation for images we can make with ``func(data, affine, header)`` - """ + """Validation for images we can make with ``func(data, affine, header)``""" + # A callable returning an image from ``image_maker(data, affine, header)`` image_maker = None # A callable returning a header from ``header_maker()`` @@ -635,7 +651,6 @@ def make_imaker(arr, aff, header=None): return lambda: self.image_maker(arr, aff, header) def make_prox_imaker(arr, aff, hdr): - def prox_imaker(): img = self.image_maker(arr, aff, hdr) rt_img = bytesio_round_trip(img) @@ -643,20 +658,14 @@ def prox_imaker(): return prox_imaker - for shape, stored_dtype in product(self.example_shapes, - self.storable_dtypes): + for shape, stored_dtype in product(self.example_shapes, self.storable_dtypes): # To make sure we do not trigger scaling, always use the # stored_dtype for the input array. arr = np.arange(np.prod(shape), dtype=stored_dtype).reshape(shape) hdr = self.header_maker() hdr.set_data_dtype(stored_dtype) func = make_imaker(arr.copy(), aff, hdr) - params = dict( - dtype=stored_dtype, - affine=aff, - data=arr, - shape=shape, - is_proxy=False) + params = dict(dtype=stored_dtype, affine=aff, data=arr, shape=shape, is_proxy=False) yield make_imaker(arr.copy(), aff, hdr), params if not self.can_save: continue @@ -667,7 +676,7 @@ def prox_imaker(): class DtypeOverrideMixin(GetSetDtypeMixin): - """ Test images that can accept ``dtype`` arguments to ``__init__`` and + """Test images that can accept ``dtype`` arguments to ``__init__`` and ``to_file_map`` """ @@ -707,8 +716,7 @@ def validate_to_file_dtype_override(self, imaker, params): class ImageHeaderAPI(MakeImageAPI): - """ When ``self.image_maker`` is an image class, make header from class - """ + """When ``self.image_maker`` is an image class, make header from class""" def header_maker(self): return self.image_maker.header_class() @@ -720,8 +728,8 @@ class TestSpatialImageAPI(ImageHeaderAPI): class TestAnalyzeAPI(TestSpatialImageAPI, DtypeOverrideMixin): - """ General image validation API instantiated for Analyze images - """ + """General image validation API instantiated for Analyze images""" + klass = image_maker = AnalyzeImage has_scaling = False can_save = True @@ -766,7 +774,6 @@ class TestMinc1API(ImageHeaderAPI): class TestMinc2API(TestMinc1API): - def setup_method(self): if not have_h5py: raise unittest.SkipTest('Need h5py for these tests') @@ -777,7 +784,6 @@ def setup_method(self): class TestPARRECAPI(LoadImageAPI): - def loader(self, fname): return parrec.load(fname) diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index c23d145a36..13c403285c 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for loader function """ +"""Tests for loader function""" from io import BytesIO import shutil @@ -22,9 +22,20 @@ from .. import spm2analyze as spm2 from .. import nifti1 as ni1 from .. import loadsave as nils -from .. import (Nifti1Image, Nifti1Header, Nifti1Pair, Nifti2Image, Nifti2Pair, - Minc1Image, Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - AnalyzeImage, MGHImage, all_image_classes) +from .. import ( + Nifti1Image, + Nifti1Header, + Nifti1Pair, + Nifti2Image, + Nifti2Pair, + Minc1Image, + Minc2Image, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + AnalyzeImage, + MGHImage, + all_image_classes, +) from ..tmpdirs import InTemporaryDirectory from ..volumeutils import native_code, swapped_code from ..optpkg import optional_package @@ -47,8 +58,9 @@ def round_trip(img): def test_conversion_spatialimages(caplog): shape = (2, 4, 6) affine = np.diag([1, 2, 3, 1]) - klasses = [klass for klass in all_image_classes - if klass.rw and issubclass(klass, SpatialImage)] + klasses = [ + klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage) + ] for npt in np.float32, np.int16: data = np.arange(np.prod(shape), dtype=npt).reshape(shape) for r_class in klasses: diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index 50142cfc92..fd9927eb00 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -6,19 +6,29 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for is_image / may_contain_header functions """ +"""Tests for is_image / may_contain_header functions""" import copy from os.path import dirname, basename, join as pjoin import numpy as np -from .. import (Nifti1Image, Nifti1Header, Nifti1Pair, - Nifti2Image, Nifti2Header, Nifti2Pair, - AnalyzeImage, AnalyzeHeader, - Minc1Image, Minc2Image, - Spm2AnalyzeImage, Spm99AnalyzeImage, - MGHImage, all_image_classes) +from .. import ( + Nifti1Image, + Nifti1Header, + Nifti1Pair, + Nifti2Image, + Nifti2Header, + Nifti2Pair, + AnalyzeImage, + AnalyzeHeader, + Minc1Image, + Minc2Image, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + MGHImage, + all_image_classes, +) DATA_PATH = pjoin(dirname(__file__), 'data') @@ -35,13 +45,12 @@ def test_sniff_and_guessed_image_type(img_klasses=all_image_classes): # either work, or fail if we're doing bad stuff. # * When the file is a mismatch, the functions should not throw. def test_image_class(img_path, expected_img_klass): - """ Compare an image of one image class to all others. + """Compare an image of one image class to all others. The function should make sure that it loads the image with the expected class, but failing when given a bad sniff (when the sniff is used).""" - def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, - msg): + def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, msg): """Embedded function to do the actual checks expected.""" if sniff_mode == 'no_sniff': @@ -49,8 +58,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, is_img, new_sniff = img_klass.path_maybe_image(img_path) elif sniff_mode in ('empty', 'irrelevant', 'bad_sniff'): # Add img_path to binaryblock sniff parameters - is_img, new_sniff = img_klass.path_maybe_image( - img_path, (sniff, img_path)) + is_img, new_sniff = img_klass.path_maybe_image(img_path, (sniff, img_path)) else: # Pass a sniff, but don't reuse across images. is_img, new_sniff = img_klass.path_maybe_image(img_path, sniff) @@ -58,16 +66,16 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, if expect_success: # Check that the sniff returned is appropriate. new_msg = f'{img_klass.__name__} returned sniff==None ({msg})' - expected_sizeof_hdr = getattr(img_klass.header_class, - 'sizeof_hdr', 0) - current_sizeof_hdr = 0 if new_sniff is None else \ - len(new_sniff[0]) + expected_sizeof_hdr = getattr(img_klass.header_class, 'sizeof_hdr', 0) + current_sizeof_hdr = 0 if new_sniff is None else len(new_sniff[0]) assert current_sizeof_hdr >= expected_sizeof_hdr, new_msg # Check that the image type was recognized. - new_msg = (f"{basename(img_path)} ({msg}) image " - f"is{'' if is_img else ' not'} " - f"a {img_klass.__name__} image.") + new_msg = ( + f'{basename(img_path)} ({msg}) image ' + f"is{'' if is_img else ' not'} " + f'a {img_klass.__name__} image.' + ) assert is_img, new_msg if sniff_mode == 'vanilla': @@ -78,40 +86,45 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, sizeof_hdr = getattr(expected_img_klass.header_class, 'sizeof_hdr', 0) for sniff_mode, sniff in dict( - vanilla=None, # use the sniff of the previous item - no_sniff=None, # Don't pass a sniff - none=None, # pass None as the sniff, should query in fn - empty=b'', # pass an empty sniff, should query in fn - irrelevant=b'a' * (sizeof_hdr - 1), # A too-small sniff, query - bad_sniff=b'a' * sizeof_hdr, # Bad sniff, should fail + vanilla=None, # use the sniff of the previous item + no_sniff=None, # Don't pass a sniff + none=None, # pass None as the sniff, should query in fn + empty=b'', # pass an empty sniff, should query in fn + irrelevant=b'a' * (sizeof_hdr - 1), # A too-small sniff, query + bad_sniff=b'a' * sizeof_hdr, # Bad sniff, should fail ).items(): for klass in img_klasses: if klass == expected_img_klass: # Class will load unless you pass a bad sniff, # or the header ignores the sniff - expect_success = (sniff_mode != 'bad_sniff' or - sizeof_hdr == 0) + expect_success = sniff_mode != 'bad_sniff' or sizeof_hdr == 0 else: expect_success = False # Not sure the relationships # Reuse the sniff... but it will only change for some # sniff_mode values. - msg = (f'{expected_img_klass.__name__}/ {sniff_mode}/ ' - f'{expect_success}') - sniff = check_img(img_path, klass, sniff_mode=sniff_mode, - sniff=sniff, expect_success=expect_success, - msg=msg) + msg = f'{expected_img_klass.__name__}/ {sniff_mode}/ ' f'{expect_success}' + sniff = check_img( + img_path, + klass, + sniff_mode=sniff_mode, + sniff=sniff, + expect_success=expect_success, + msg=msg, + ) # Test whether we can guess the image type from example files - for img_filename, image_klass in [('example4d.nii.gz', Nifti1Image), - ('nifti1.hdr', Nifti1Pair), - ('example_nifti2.nii.gz', Nifti2Image), - ('nifti2.hdr', Nifti2Pair), - ('tiny.mnc', Minc1Image), - ('small.mnc', Minc2Image), - ('test.mgz', MGHImage), - ('analyze.hdr', Spm2AnalyzeImage)]: + for img_filename, image_klass in [ + ('example4d.nii.gz', Nifti1Image), + ('nifti1.hdr', Nifti1Pair), + ('example_nifti2.nii.gz', Nifti2Image), + ('nifti2.hdr', Nifti2Pair), + ('tiny.mnc', Minc1Image), + ('small.mnc', Minc2Image), + ('test.mgz', MGHImage), + ('analyze.hdr', Spm2AnalyzeImage), + ]: # print('Testing: %s %s' % (img_filename, image_klass.__name__)) test_image_class(pjoin(DATA_PATH, img_filename), image_klass) diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 601414e012..472e1c5d63 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -1,4 +1,4 @@ -""" Testing imageclasses module +"""Testing imageclasses module """ from os.path import dirname, join as pjoin diff --git a/nibabel/tests/test_imageglobals.py b/nibabel/tests/test_imageglobals.py index 42cbe6fdce..ac043d192b 100644 --- a/nibabel/tests/test_imageglobals.py +++ b/nibabel/tests/test_imageglobals.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for imageglobals module +"""Tests for imageglobals module """ from .. import imageglobals as igs diff --git a/nibabel/tests/test_imagestats.py b/nibabel/tests/test_imagestats.py index e104013ddd..47dd2ecbd5 100644 --- a/nibabel/tests/test_imagestats.py +++ b/nibabel/tests/test_imagestats.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for image statistics """ +"""Tests for image statistics""" import numpy as np diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index 97f440497e..c227889e59 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -3,57 +3,56 @@ import pytest from unittest import mock -@pytest.mark.parametrize("verbose, v_args", [(-2, ["-qq"]), - (-1, ["-q"]), - (0, []), - (1, ["-v"]), - (2, ["-vv"])]) -@pytest.mark.parametrize("doctests", (True, False)) -@pytest.mark.parametrize("coverage", (True, False)) + +@pytest.mark.parametrize( + 'verbose, v_args', [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] +) +@pytest.mark.parametrize('doctests', (True, False)) +@pytest.mark.parametrize('coverage', (True, False)) def test_nibabel_test(verbose, v_args, doctests, coverage): - expected_args = v_args + ["--doctest-modules", "--cov", "nibabel", "--pyargs", "nibabel"] + expected_args = v_args + ['--doctest-modules', '--cov', 'nibabel', '--pyargs', 'nibabel'] if not doctests: - expected_args.remove("--doctest-modules") + expected_args.remove('--doctest-modules') if not coverage: expected_args[-4:-2] = [] - with mock.patch("pytest.main") as pytest_main: + with mock.patch('pytest.main') as pytest_main: nib.test(verbose=verbose, doctests=doctests, coverage=coverage) args, kwargs = pytest_main.call_args assert args == () - assert kwargs == {"args": expected_args} + assert kwargs == {'args': expected_args} def test_nibabel_test_errors(): with pytest.raises(NotImplementedError): - nib.test(label="fast") + nib.test(label='fast') with pytest.raises(NotImplementedError): nib.test(raise_warnings=[]) with pytest.raises(NotImplementedError): nib.test(timer=True) with pytest.raises(ValueError): - nib.test(verbose="-v") + nib.test(verbose='-v') def test_nibabel_bench(): - expected_args = ["-c", "--pyargs", "nibabel"] + expected_args = ['-c', '--pyargs', 'nibabel'] try: - expected_args.insert(1, resource_filename("nibabel", "benchmarks/pytest.benchmark.ini")) + expected_args.insert(1, resource_filename('nibabel', 'benchmarks/pytest.benchmark.ini')) except: - raise unittest.SkipTest("Not installed") + raise unittest.SkipTest('Not installed') - with mock.patch("pytest.main") as pytest_main: + with mock.patch('pytest.main') as pytest_main: nib.bench(verbose=0) args, kwargs = pytest_main.call_args assert args == () - assert kwargs == {"args": expected_args} + assert kwargs == {'args': expected_args} - with mock.patch("pytest.main") as pytest_main: + with mock.patch('pytest.main') as pytest_main: nib.bench(verbose=0, extra_argv=[]) args, kwargs = pytest_main.call_args assert args == () - assert kwargs == {"args": expected_args} + assert kwargs == {'args': expected_args} diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 799952b57d..f8cc168cfd 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -1,4 +1,4 @@ -""" Testing loadsave module +"""Testing loadsave module """ from os.path import dirname, join as pjoin @@ -7,9 +7,14 @@ import numpy as np -from .. import (Spm99AnalyzeImage, Spm2AnalyzeImage, - Nifti1Pair, Nifti1Image, - Nifti2Pair, Nifti2Image) +from .. import ( + Spm99AnalyzeImage, + Spm2AnalyzeImage, + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Nifti2Image, +) from ..loadsave import load, read_img_data, _signature_matches_extension from ..filebasedimages import ImageFileError from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory @@ -17,18 +22,18 @@ from ..testing import expires from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy') _, have_pyzstd, _ = optional_package('pyzstd') -from numpy.testing import (assert_almost_equal, - assert_array_equal) +from numpy.testing import assert_almost_equal, assert_array_equal import pytest data_path = pjoin(dirname(__file__), 'data') -@expires("5.0.0") +@expires('5.0.0') def test_read_img_data(): fnames_test = [ 'example4d.nii.gz', @@ -36,7 +41,7 @@ def test_read_img_data(): 'minc1_1_scale.mnc', 'minc1_4d.mnc', 'test.mgz', - 'tiny.mnc' + 'tiny.mnc', ] fnames_test += [pathlib.Path(p) for p in fnames_test] for fname in fnames_test: @@ -78,51 +83,51 @@ def test_load_empty_image(): assert str(err.value).startswith('Empty file: ') -@pytest.mark.parametrize("extension", [".gz", ".bz2", ".zst"]) +@pytest.mark.parametrize('extension', ['.gz', '.bz2', '.zst']) def test_load_bad_compressed_extension(tmp_path, extension): - if extension == ".zst" and not have_pyzstd: + if extension == '.zst' and not have_pyzstd: pytest.skip() - file_path = tmp_path / f"img.nii{extension}" - file_path.write_bytes(b"bad") - with pytest.raises(ImageFileError, match=".*is not a .* file"): + file_path = tmp_path / f'img.nii{extension}' + file_path.write_bytes(b'bad') + with pytest.raises(ImageFileError, match='.*is not a .* file'): load(file_path) -@pytest.mark.parametrize("extension", [".gz", ".bz2", ".zst"]) +@pytest.mark.parametrize('extension', ['.gz', '.bz2', '.zst']) def test_load_good_extension_with_bad_data(tmp_path, extension): - if extension == ".zst" and not have_pyzstd: + if extension == '.zst' and not have_pyzstd: pytest.skip() - file_path = tmp_path / f"img.nii{extension}" - with Opener(file_path, "wb") as fobj: - fobj.write(b"bad") - with pytest.raises(ImageFileError, match="Cannot work out file type of .*"): + file_path = tmp_path / f'img.nii{extension}' + with Opener(file_path, 'wb') as fobj: + fobj.write(b'bad') + with pytest.raises(ImageFileError, match='Cannot work out file type of .*'): load(file_path) def test_signature_matches_extension(tmp_path): - gz_signature = b"\x1f\x8b" - good_file = tmp_path / "good.gz" + gz_signature = b'\x1f\x8b' + good_file = tmp_path / 'good.gz' good_file.write_bytes(gz_signature) - bad_file = tmp_path / "bad.gz" - bad_file.write_bytes(b"bad") - matches, msg = _signature_matches_extension(tmp_path / "uncompressed.nii") + bad_file = tmp_path / 'bad.gz' + bad_file.write_bytes(b'bad') + matches, msg = _signature_matches_extension(tmp_path / 'uncompressed.nii') assert matches - assert msg == "" - matches, msg = _signature_matches_extension(tmp_path / "missing.gz") + assert msg == '' + matches, msg = _signature_matches_extension(tmp_path / 'missing.gz') assert not matches - assert msg.startswith("Could not read") + assert msg.startswith('Could not read') matches, msg = _signature_matches_extension(bad_file) assert not matches - assert "is not a" in msg + assert 'is not a' in msg matches, msg = _signature_matches_extension(good_file) assert matches - assert msg == "" - matches, msg = _signature_matches_extension(tmp_path / "missing.nii") + assert msg == '' + matches, msg = _signature_matches_extension(tmp_path / 'missing.nii') assert matches - assert msg == "" + assert msg == '' -@expires("5.0.0") +@expires('5.0.0') def test_read_img_data_nifti(): shape = (2, 3, 4) data = np.random.normal(size=shape) @@ -152,8 +157,7 @@ def test_read_img_data_nifti(): with pytest.deprecated_call(): assert_array_equal(data_back, read_img_data(img_back)) # This is the same as if we loaded the image and header separately - hdr_fname = (img.file_map['header'].filename - if 'header' in img.file_map else img_fname) + hdr_fname = img.file_map['header'].filename if 'header' in img.file_map else img_fname with open(hdr_fname, 'rb') as fobj: hdr_back = img_back.header_class.from_fileobj(fobj) with open(img_fname, 'rb') as fobj: @@ -182,12 +186,10 @@ def test_read_img_data_nifti(): new_inter = 0 # scaled scaling comes from new parameters in header with pytest.deprecated_call(): - assert np.allclose(actual_unscaled * 2.1 + new_inter, - read_img_data(img_back)) + assert np.allclose(actual_unscaled * 2.1 + new_inter, read_img_data(img_back)) # Unscaled array didn't change with pytest.deprecated_call(): - assert_array_equal(actual_unscaled, - read_img_data(img_back, prefer='unscaled')) + assert_array_equal(actual_unscaled, read_img_data(img_back, prefer='unscaled')) # Check the offset too img.header.set_data_offset(1024) # Delete arrays still pointing to file, so Windows can re-use diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 4fecf5782e..4556f76787 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -33,7 +33,7 @@ from . import test_spatialimages as tsi from .test_fileslice import slicer_samples -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') EG_FNAME = pjoin(data_path, 'tiny.mnc') @@ -44,62 +44,42 @@ fname=pjoin(data_path, 'tiny.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(2.0, 2.0, 2.0), # These values from SPM2 - data_summary=dict( - min=0.20784314, - max=0.74901961, - mean=0.60602819), - is_proxy=True), + data_summary=dict(min=0.20784314, max=0.74901961, mean=0.60602819), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc1_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2082842439, - max=0.2094327615, - mean=0.2091292083), - is_proxy=True), + data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc1_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(1., 2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2078431373, - max=1.498039216, - mean=0.9090422837), - is_proxy=True), + data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc1-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], - [0, 1.0, 0, 0], - [1.0, 0, 0, 0], - [0, 0, 0, 1]]), - zooms=(1., 1., 1.), + affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats - data_summary=dict( - min=0.20784314, - max=0.74901961, - mean=0.6061103), - is_proxy=True), + data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), + is_proxy=True, + ), ] @@ -129,14 +109,15 @@ def test_mincfile_slicing(self): mnc_obj = self.opener(tp['fname'], 'r') mnc = self.file_class(mnc_obj) data = mnc.get_scaled_data() - for slicedef in ((slice(None),), - (1,), - (slice(None), 1), - (1, slice(None)), - (slice(None), 1, 1), - (1, slice(None), 1), - (1, 1, slice(None)), - ): + for slicedef in ( + (slice(None),), + (1,), + (slice(None), 1), + (1, slice(None)), + (slice(None), 1, 1), + (1, slice(None), 1), + (1, 1, slice(None)), + ): sliced_data = mnc.get_scaled_data(slicedef) assert_array_equal(sliced_data, data[slicedef]) # Can't close mmapped NetCDF with live mmap arrays @@ -167,14 +148,12 @@ def test_array_proxy_slicing(self): class TestMinc1File(_TestMincFile): - def test_compressed(self): # we can read minc compressed # Not so for MINC2; hence this small sub-class for tp in self.test_files: content = open(tp['fname'], 'rb').read() - openers_exts = [(gzip.open, '.gz'), - (bz2.BZ2File, '.bz2')] + openers_exts = [(gzip.open, '.gz'), (bz2.BZ2File, '.bz2')] if HAVE_ZSTD: # add .zst to test if installed openers_exts += [(pyzstd.ZstdFile, '.zst')] with InTemporaryDirectory(): diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index 1842ca02f9..3e220ef2d1 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -28,80 +28,63 @@ fname=pjoin(data_path, 'small.mnc'), shape=(18, 28, 29), dtype=np.int16, - affine=np.array([[0, 0, 7.0, -98], - [0, 8.0, 0, -134], - [9.0, 0, 0, -72], - [0, 0, 0, 1]]), - zooms=(9., 8., 7.), + affine=np.array([[0, 0, 7.0, -98], [0, 8.0, 0, -134], [9.0, 0, 0, -72], [0, 0, 0, 1]]), + zooms=(9.0, 8.0, 7.0), # These values from mincstats - data_summary=dict( - min=0.1185331417, - max=92.87690699, - mean=31.2127952), - is_proxy=True), + data_summary=dict(min=0.1185331417, max=92.87690699, mean=31.2127952), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2082842439, - max=0.2094327615, - mean=0.2091292083), - is_proxy=True), + data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(1., 2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2078431373, - max=1.498039216, - mean=0.9090422837), - is_proxy=True), + data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], - [0, 1.0, 0, 0], - [1.0, 0, 0, 0], - [0, 0, 0, 1]]), - zooms=(1., 1., 1.), + affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats - data_summary=dict( - min=0.20784314, - max=0.74901961, - mean=0.6061103), - is_proxy=True), + data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2-4d-d.mnc'), shape=(5, 16, 16, 16), dtype=np.float64, - affine=np.array([[1., 0., 0., -6.96 ], - [0., 1., 0., -12.453], - [0., 0., 1., -9.48 ], - [0., 0., 0., 1.]]), - zooms=(1., 1., 1., 1.), + affine=np.array( + [ + [1.0, 0.0, 0.0, -6.96], + [0.0, 1.0, 0.0, -12.453], + [0.0, 0.0, 1.0, -9.48], + [0.0, 0.0, 0.0, 1.0], + ] + ), + zooms=(1.0, 1.0, 1.0, 1.0), # These values from mincstats - data_summary=dict( - min=0.0, - max=5.0, - mean=2.00078125), - is_proxy=True), + data_summary=dict(min=0.0, max=5.0, mean=2.00078125), + is_proxy=True, + ), ] if have_h5py: + class TestMinc2File(tm2._TestMincFile): module = minc2 file_class = Minc2File diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index fda6c1f8ec..03fb93cbea 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test we can correctly import example MINC2_PATH files +"""Test we can correctly import example MINC2_PATH files """ import os @@ -18,7 +18,7 @@ from .. import load as top_load, Nifti1Image from ..optpkg import optional_package -from numpy.testing import (assert_array_equal, assert_almost_equal) +from numpy.testing import assert_array_equal, assert_almost_equal h5py, have_h5py, setup_module = optional_package('h5py') @@ -37,30 +37,28 @@ def _make_affine(coses, zooms, starts): class TestEPIFrame: opener = staticmethod(top_load) x_cos = [1, 0, 0] - y_cos = [0., 1, 0] + y_cos = [0.0, 1, 0] z_cos = [0, 0, 1] - zooms = [-0.8984375, -0.8984375, 3.] + zooms = [-0.8984375, -0.8984375, 3.0] starts = [117.25609125, 138.89861125, -54.442028] example_params = dict( fname=os.path.join(MINC2_PATH, 'mincex_EPI-frame.mnc'), shape=(40, 256, 256), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats - min=0., + min=0.0, max=1273, - mean=93.52085367) + mean=93.52085367, + ) @needs_nibabel_data('nitest-minc2') def test_load(self): # Check highest level load of minc works img = self.opener(self.example_params['fname']) assert img.shape == self.example_params['shape'] - assert_almost_equal(img.header.get_zooms(), - self.example_params['zooms'], 5) + assert_almost_equal(img.header.get_zooms(), self.example_params['zooms'], 5) assert_almost_equal(img.affine, self.example_params['affine'], 4) assert img.get_data_dtype().type == self.example_params['type'] # Check correspondence of data and recorded shape @@ -77,8 +75,8 @@ def test_load(self): class TestB0(TestEPIFrame): - x_cos = [0.9970527523765, 0., 0.0767190261828617] - y_cos = [0., 1., -6.9388939e-18] + x_cos = [0.9970527523765, 0.0, 0.0767190261828617] + y_cos = [0.0, 1.0, -6.9388939e-18] z_cos = [-0.0767190261828617, 6.9184432614435e-18, 0.9970527523765] zooms = [-0.8984375, -0.8984375, 6.49999990444107] starts = [105.473101260826, 151.74885125, -61.8714747993248] @@ -86,14 +84,13 @@ class TestB0(TestEPIFrame): fname=os.path.join(MINC2_PATH, 'mincex_diff-B0.mnc'), shape=(19, 256, 256), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats min=4.566971917, max=3260.121093, - mean=163.8305553) + mean=163.8305553, + ) class TestFA(TestEPIFrame): @@ -103,28 +100,28 @@ class TestFA(TestEPIFrame): # These values from mincstats min=0.008068881038, max=1.224754546, - mean=0.7520087469) + mean=0.7520087469, + ) example_params.update(new_params) class TestGado(TestEPIFrame): x_cos = [0.999695413509548, -0.0174524064372835, 0.0174497483512505] y_cos = [0.0174497483512505, 0.999847695156391, 0.000304586490452135] - z_cos = [-0.0174524064372835, 0., 0.999847695156391] + z_cos = [-0.0174524064372835, 0.0, 0.999847695156391] zooms = [1, -1, -1] starts = [-75.76775, 115.80462, 81.38605] example_params = dict( fname=os.path.join(MINC2_PATH, 'mincex_gado-contrast.mnc'), shape=(100, 170, 146), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats min=0, max=938668.8698, - mean=128169.3488) + mean=128169.3488, + ) class TestT1(TestEPIFrame): @@ -137,14 +134,13 @@ class TestT1(TestEPIFrame): fname=os.path.join(MINC2_PATH, 'mincex_t1.mnc'), shape=(110, 217, 181), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats min=0, max=100, - mean=23.1659928) + mean=23.1659928, + ) class TestPD(TestEPIFrame): @@ -154,7 +150,8 @@ class TestPD(TestEPIFrame): # These values from mincstats min=0, max=102.5024482, - mean=23.82625718) + mean=23.82625718, + ) example_params.update(new_params) @@ -166,5 +163,6 @@ class TestMask(TestEPIFrame): # These values from mincstats min=0, max=1, - mean=0.3817466618) + mean=0.3817466618, + ) example_params.update(new_params) diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index 8c6b198c95..082d053805 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing mriutils module +"""Testing mriutils module """ @@ -20,8 +20,7 @@ def test_calculate_dwell_time(): # Test dwell time calculation # This tests only that the calculation does what it appears to; needs some # external check - assert_almost_equal(calculate_dwell_time(3.3, 2, 3), - 3.3 / (42.576 * 3.4 * 3 * 3)) + assert_almost_equal(calculate_dwell_time(3.3, 2, 3), 3.3 / (42.576 * 3.4 * 3 * 3)) # Echo train length of 1 is valid, but returns 0 dwell time assert_almost_equal(calculate_dwell_time(3.3, 1, 3), 0) with pytest.raises(MRIError): diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index 86e94f5c34..ec97108e35 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -1,4 +1,4 @@ -""" Tests for ``get_nibabel_data`` +"""Tests for ``get_nibabel_data`` """ import os diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 63cf13c103..0018dfe842 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for nifti reading package """ +"""Tests for nifti reading package""" import os import warnings import struct @@ -18,10 +18,19 @@ from nibabel.casting import type_info, have_binary128 from nibabel.eulerangles import euler2mat from io import BytesIO -from nibabel.nifti1 import (load, Nifti1Header, Nifti1PairHeader, Nifti1Image, - Nifti1Pair, Nifti1Extension, Nifti1DicomExtension, - Nifti1Extensions, data_type_codes, extension_codes, - slice_order_codes) +from nibabel.nifti1 import ( + load, + Nifti1Header, + Nifti1PairHeader, + Nifti1Image, + Nifti1Pair, + Nifti1Extension, + Nifti1DicomExtension, + Nifti1Extensions, + data_type_codes, + extension_codes, + slice_order_codes, +) from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory from nibabel.optpkg import optional_package @@ -32,8 +41,7 @@ from .test_orientations import ALL_ORNTS from .nibabel_data import get_nibabel_data, needs_nibabel_data -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_almost_equal) +from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal from ..testing import ( clear_and_catch_warnings, @@ -41,7 +49,7 @@ runif_extra_has, suppress_warnings, bytesio_filemap, - bytesio_round_trip + bytesio_round_trip, ) import unittest @@ -53,8 +61,8 @@ header_file = os.path.join(data_path, 'nifti1.hdr') image_file = os.path.join(data_path, 'example4d.nii.gz') -pydicom, have_dicom, _ = optional_package("pydicom") -dicom_test = unittest.skipUnless(have_dicom, "Could not import pydicom") +pydicom, have_dicom, _ = optional_package('pydicom') +dicom_test = unittest.skipUnless(have_dicom, 'Could not import pydicom') # Example transformation matrix @@ -70,17 +78,11 @@ class TestNifti1PairHeader(tana.TestAnalyzeHeader, tspm.HeaderScalingMixin): header_class = Nifti1PairHeader example_file = header_file quat_dtype = np.float32 - supported_np_types = tana.TestAnalyzeHeader.supported_np_types.union(( - np.int8, - np.uint16, - np.uint32, - np.int64, - np.uint64, - np.complex128)) + supported_np_types = tana.TestAnalyzeHeader.supported_np_types.union( + (np.int8, np.uint16, np.uint32, np.int64, np.uint64, np.complex128) + ) if have_binary128(): - supported_np_types = supported_np_types.union(( - np.longdouble, - np.longcomplex)) + supported_np_types = supported_np_types.union((np.longdouble, np.longcomplex)) tana.add_intp(supported_np_types) def test_empty(self): @@ -145,40 +147,41 @@ def test_slope_inter(self): HDE = HeaderDataError assert hdr.get_slope_inter() == (1.0, 0.0) for in_tup, exp_err, out_tup, raw_values in ( - # Null scalings - ((None, None), None, (None, None), (nan, nan)), - ((nan, None), None, (None, None), (nan, nan)), - ((None, nan), None, (None, None), (nan, nan)), - ((nan, nan), None, (None, None), (nan, nan)), - # Can only be one null - ((None, 0), HDE, (None, None), (nan, 0)), - ((nan, 0), HDE, (None, None), (nan, 0)), - ((1, None), HDE, (None, None), (1, nan)), - ((1, nan), HDE, (None, None), (1, nan)), - # Bad slope plus anything generates an error - ((0, 0), HDE, (None, None), (0, 0)), - ((0, None), HDE, (None, None), (0, nan)), - ((0, nan), HDE, (None, None), (0, nan)), - ((0, inf), HDE, (None, None), (0, inf)), - ((0, minf), HDE, (None, None), (0, minf)), - ((inf, 0), HDE, (None, None), (inf, 0)), - ((inf, None), HDE, (None, None), (inf, nan)), - ((inf, nan), HDE, (None, None), (inf, nan)), - ((inf, inf), HDE, (None, None), (inf, inf)), - ((inf, minf), HDE, (None, None), (inf, minf)), - ((minf, 0), HDE, (None, None), (minf, 0)), - ((minf, None), HDE, (None, None), (minf, nan)), - ((minf, nan), HDE, (None, None), (minf, nan)), - ((minf, inf), HDE, (None, None), (minf, inf)), - ((minf, minf), HDE, (None, None), (minf, minf)), - # Good slope and bad inter generates error for get_slope_inter - ((2, None), HDE, HDE, (2, nan)), - ((2, nan), HDE, HDE, (2, nan)), - ((2, inf), HDE, HDE, (2, inf)), - ((2, minf), HDE, HDE, (2, minf)), - # Good slope and inter - you guessed it - ((2, 0), None, (2, 0), (2, 0)), - ((2, 1), None, (2, 1), (2, 1))): + # Null scalings + ((None, None), None, (None, None), (nan, nan)), + ((nan, None), None, (None, None), (nan, nan)), + ((None, nan), None, (None, None), (nan, nan)), + ((nan, nan), None, (None, None), (nan, nan)), + # Can only be one null + ((None, 0), HDE, (None, None), (nan, 0)), + ((nan, 0), HDE, (None, None), (nan, 0)), + ((1, None), HDE, (None, None), (1, nan)), + ((1, nan), HDE, (None, None), (1, nan)), + # Bad slope plus anything generates an error + ((0, 0), HDE, (None, None), (0, 0)), + ((0, None), HDE, (None, None), (0, nan)), + ((0, nan), HDE, (None, None), (0, nan)), + ((0, inf), HDE, (None, None), (0, inf)), + ((0, minf), HDE, (None, None), (0, minf)), + ((inf, 0), HDE, (None, None), (inf, 0)), + ((inf, None), HDE, (None, None), (inf, nan)), + ((inf, nan), HDE, (None, None), (inf, nan)), + ((inf, inf), HDE, (None, None), (inf, inf)), + ((inf, minf), HDE, (None, None), (inf, minf)), + ((minf, 0), HDE, (None, None), (minf, 0)), + ((minf, None), HDE, (None, None), (minf, nan)), + ((minf, nan), HDE, (None, None), (minf, nan)), + ((minf, inf), HDE, (None, None), (minf, inf)), + ((minf, minf), HDE, (None, None), (minf, minf)), + # Good slope and bad inter generates error for get_slope_inter + ((2, None), HDE, HDE, (2, nan)), + ((2, nan), HDE, HDE, (2, nan)), + ((2, inf), HDE, HDE, (2, inf)), + ((2, minf), HDE, HDE, (2, minf)), + # Good slope and inter - you guessed it + ((2, 0), None, (2, 0), (2, 0)), + ((2, 1), None, (2, 1), (2, 1)), + ): hdr = self.header_class() if not exp_err is None: with pytest.raises(exp_err): @@ -250,15 +253,18 @@ def test_magic_offset_checks(self): hdr['magic'] = 'ooh' fhdr, message, raiser = self.log_chk(hdr, 45) assert fhdr['magic'] == b'ooh' - assert (message == - 'magic string "ooh" is not valid; ' - 'leaving as is, but future errors are likely') + assert ( + message == 'magic string "ooh" is not valid; ' + 'leaving as is, but future errors are likely' + ) # For pairs, any offset is OK, but should be divisible by 16 # Singles need offset of at least 352 (nifti1) or 540 (nifti2) bytes, # with the divide by 16 rule svo = hdr.single_vox_offset - for magic, ok, bad_spm in ((hdr.pair_magic, 32, 40), - (hdr.single_magic, svo + 32, svo + 40)): + for magic, ok, bad_spm in ( + (hdr.pair_magic, 32, 40), + (hdr.single_magic, svo + 32, svo + 40), + ): hdr['magic'] = magic hdr['vox_offset'] = 0 self.assert_no_log_err(hdr) @@ -267,18 +273,20 @@ def test_magic_offset_checks(self): hdr['vox_offset'] = bad_spm fhdr, message, raiser = self.log_chk(hdr, 30) assert fhdr['vox_offset'] == bad_spm - assert (message == - f'vox offset (={bad_spm:g}) not divisible by 16, ' - 'not SPM compatible; leaving at current value') + assert ( + message == f'vox offset (={bad_spm:g}) not divisible by 16, ' + 'not SPM compatible; leaving at current value' + ) # Check minimum offset (if offset set) hdr['magic'] = hdr.single_magic hdr['vox_offset'] = 10 fhdr, message, raiser = self.log_chk(hdr, 40) assert fhdr['vox_offset'] == hdr.single_vox_offset - assert (message == - 'vox offset 10 too low for single ' - 'file nifti1; setting to minimum value ' - 'of ' + str(hdr.single_vox_offset)) + assert ( + message == 'vox offset 10 too low for single ' + 'file nifti1; setting to minimum value ' + 'of ' + str(hdr.single_vox_offset) + ) def test_freesurfer_large_vector_hack(self): # For large vector images, Freesurfer appears to set dim[1] to -1 and @@ -360,14 +368,13 @@ def test_freesurfer_ico7_hack(self): pytest.raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, 163842)) # Test consistency of data in .mgh and mri_convert produced .nii nitest_path = os.path.join(get_nibabel_data(), 'nitest-freesurfer') - mgh = mghload(os.path.join(nitest_path, 'fsaverage', 'surf', - 'lh.orig.avg.area.mgh')) - nii = load(os.path.join(nitest_path, 'derivative', 'fsaverage', 'surf', - 'lh.orig.avg.area.nii')) + mgh = mghload(os.path.join(nitest_path, 'fsaverage', 'surf', 'lh.orig.avg.area.mgh')) + nii = load( + os.path.join(nitest_path, 'derivative', 'fsaverage', 'surf', 'lh.orig.avg.area.nii') + ) assert mgh.shape == nii.shape assert_array_equal(mgh.get_fdata(), nii.get_fdata()) - assert_array_equal(nii.header._structarr['dim'][1:4], - np.array([27307, 1, 6])) + assert_array_equal(nii.header._structarr['dim'][1:4], np.array([27307, 1, 6])) # Test writing produces consistent nii files with InTemporaryDirectory(): nii.to_filename('test.nii') @@ -393,8 +400,7 @@ def test_qform_sform(self): nasty_aff[0, 0] = 1 # Make full rank fixed_aff = unshear_44(nasty_aff) assert not np.allclose(fixed_aff, nasty_aff) - for in_meth, out_meth in ((hdr.set_qform, hdr.get_qform), - (hdr.set_sform, hdr.get_sform)): + for in_meth, out_meth in ((hdr.set_qform, hdr.get_qform), (hdr.set_sform, hdr.get_sform)): in_meth(nice_aff, 2) aff, code = out_meth(coded=True) assert_array_equal(aff, nice_aff) @@ -507,13 +513,14 @@ def test_sform(self): def test_dim_info(self): ehdr = self.header_class() assert ehdr.get_dim_info() == (None, None, None) - for info in ((0, 2, 1), - (None, None, None), - (0, 2, None), - (0, None, None), - (None, 2, 1), - (None, None, 1), - ): + for info in ( + (0, 2, 1), + (None, None, None), + (0, 2, None), + (0, None, None), + (None, 2, 1), + (None, None, 1), + ): ehdr.set_dim_info(*info) assert ehdr.get_dim_info() == info @@ -537,27 +544,28 @@ def test_slice_times(self): _print_me = lambda s: list(map(_stringer, s)) # The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] - assert (_print_me(hdr.get_slice_times()) == - ['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']) + assert _print_me(hdr.get_slice_times()) == [ + '0.0', + '0.1', + '0.2', + '0.3', + '0.4', + '0.5', + '0.6', + ] hdr['slice_start'] = 1 hdr['slice_end'] = 5 - assert (_print_me(hdr.get_slice_times()) == - [None, '0.0', '0.1', '0.2', '0.3', '0.4', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.0', '0.1', '0.2', '0.3', '0.4', None] hdr['slice_code'] = slice_order_codes['sequential decreasing'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.4', '0.3', '0.2', '0.1', '0.0', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.4', '0.3', '0.2', '0.1', '0.0', None] hdr['slice_code'] = slice_order_codes['alternating increasing'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.0', '0.3', '0.1', '0.4', '0.2', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.0', '0.3', '0.1', '0.4', '0.2', None] hdr['slice_code'] = slice_order_codes['alternating decreasing'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.2', '0.4', '0.1', '0.3', '0.0', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.2', '0.4', '0.1', '0.3', '0.0', None] hdr['slice_code'] = slice_order_codes['alternating increasing 2'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.2', '0.0', '0.3', '0.1', '0.4', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.2', '0.0', '0.3', '0.1', '0.4', None] hdr['slice_code'] = slice_order_codes['alternating decreasing 2'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.4', '0.1', '0.3', '0.0', '0.2', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.4', '0.1', '0.3', '0.0', '0.2', None] # test set hdr = self.header_class() hdr.set_dim_info(slice=2) @@ -583,8 +591,7 @@ def test_slice_times(self): # can't get single slice duration hdr.set_slice_times(funny_times) hdr.set_slice_times(times) - assert (hdr.get_value_label('slice_code') == - 'alternating decreasing') + assert hdr.get_value_label('slice_code') == 'alternating decreasing' assert hdr['slice_start'] == 1 assert hdr['slice_end'] == 5 assert_array_almost_equal(hdr['slice_duration'], 0.1) @@ -605,7 +612,6 @@ def test_slice_times(self): assert len(w) == 1 assert hdr2.get_value_label('slice_code') == 'sequential increasing' - def test_intents(self): ehdr = self.header_class() ehdr.set_intent('t test', (10,), name='some score') @@ -626,8 +632,7 @@ def test_intents(self): ehdr.set_intent('f test', (10,)) # check unset parameters are set to 0, and name to '' ehdr.set_intent('t test') - assert ((ehdr['intent_p1'], ehdr['intent_p2'], ehdr['intent_p3']) == - (0, 0, 0)) + assert (ehdr['intent_p1'], ehdr['intent_p2'], ehdr['intent_p3']) == (0, 0, 0) assert ehdr['intent_name'] == b'' ehdr.set_intent('t test', (10,)) assert (ehdr['intent_p2'], ehdr['intent_p3']) == (0, 0) @@ -647,7 +652,7 @@ def test_intents(self): with pytest.raises(HeaderDataError): ehdr.set_intent(999, (1,), allow_unknown=True) with pytest.raises(HeaderDataError): - ehdr.set_intent(999, (1,2), allow_unknown=True) + ehdr.set_intent(999, (1, 2), allow_unknown=True) def test_set_slice_times(self): hdr = self.header_class() @@ -776,7 +781,7 @@ def test_int64_warning(self): img_klass(data, np.eye(4)) # No warnings if we're explicit, though with clear_and_catch_warnings(): - warnings.simplefilter("error") + warnings.simplefilter('error') img_klass(data, np.eye(4), dtype=dtype) hdr = hdr_klass() hdr.set_data_dtype(dtype) @@ -854,8 +859,7 @@ def test_header_update_affine(self): assert hdr['qform_code'] == 2 def test_set_qform(self): - img = self.image_class(np.zeros((2, 3, 4)), - np.diag([2.2, 3.3, 4.3, 1])) + img = self.image_class(np.zeros((2, 3, 4)), np.diag([2.2, 3.3, 4.3, 1])) hdr = img.header new_affine = np.diag([1.1, 1.1, 1.1, 1]) # Affine is same as sform (best affine) @@ -988,7 +992,6 @@ def test_sqform_code_type(self): img.set_sform(None, img.get_sform(coded=True)[1]) img.set_qform(None, img.get_qform(coded=True)[1]) - def test_hdr_diff(self): # Check an offset beyond data does not raise an error img = self.image_class(np.zeros((2, 3, 4)), np.eye(4)) @@ -1019,8 +1022,9 @@ def test_load_save(self): assert isinstance(img3, img.__class__) assert_array_equal(img3.get_fdata(), data) assert img3.header == img.header - assert isinstance(np.asanyarray(img3.dataobj), - np.memmap if ext == '' else np.ndarray) + assert isinstance( + np.asanyarray(img3.dataobj), np.memmap if ext == '' else np.ndarray + ) # del to avoid windows errors of form 'The process cannot # access the file because it is being used' del img3 @@ -1111,40 +1115,41 @@ def _set_raw_scaling(self, hdr, slope, inter): def test_write_scaling(self): # Check we can set slope, inter on write for slope, inter, e_slope, e_inter in ( - (1, 0, 1, 0), - (2, 0, 2, 0), - (2, 1, 2, 1), - (0, 0, 1, 0), - (np.inf, 0, 1, 0)): + (1, 0, 1, 0), + (2, 0, 2, 0), + (2, 1, 2, 1), + (0, 0, 1, 0), + (np.inf, 0, 1, 0), + ): with np.errstate(invalid='ignore'): self._check_write_scaling(slope, inter, e_slope, e_inter) def test_dynamic_dtype_aliases(self): for in_dt, mn, mx, alias, effective_dt in [ - (np.uint8, 0, 255, 'compat', np.uint8), - (np.int8, 0, 127, 'compat', np.uint8), - (np.int8, -128, 127, 'compat', np.int16), - (np.int16, -32768, 32767, 'compat', np.int16), - (np.uint16, 0, 32767, 'compat', np.int16), - (np.uint16, 0, 65535, 'compat', np.int32), - (np.int32, -2**31, 2**31-1, 'compat', np.int32), - (np.uint32, 0, 2**31-1, 'compat', np.int32), - (np.uint32, 0, 2**32-1, 'compat', None), - (np.int64, -2**31, 2**31-1, 'compat', np.int32), - (np.uint64, 0, 2**31-1, 'compat', np.int32), - (np.int64, 0, 2**32-1, 'compat', None), - (np.uint64, 0, 2**32-1, 'compat', None), - (np.float32, 0, 1e30, 'compat', np.float32), - (np.float64, 0, 1e30, 'compat', np.float32), - (np.float64, 0, 1e40, 'compat', None), - (np.int64, 0, 255, 'smallest', np.uint8), - (np.int64, 0, 256, 'smallest', np.int16), - (np.int64, -1, 255, 'smallest', np.int16), - (np.int64, 0, 32768, 'smallest', np.int32), - (np.int64, 0, 4294967296, 'smallest', None), - (np.float32, 0, 1, 'smallest', None), - (np.float64, 0, 1, 'smallest', None) - ]: + (np.uint8, 0, 255, 'compat', np.uint8), + (np.int8, 0, 127, 'compat', np.uint8), + (np.int8, -128, 127, 'compat', np.int16), + (np.int16, -32768, 32767, 'compat', np.int16), + (np.uint16, 0, 32767, 'compat', np.int16), + (np.uint16, 0, 65535, 'compat', np.int32), + (np.int32, -(2**31), 2**31 - 1, 'compat', np.int32), + (np.uint32, 0, 2**31 - 1, 'compat', np.int32), + (np.uint32, 0, 2**32 - 1, 'compat', None), + (np.int64, -(2**31), 2**31 - 1, 'compat', np.int32), + (np.uint64, 0, 2**31 - 1, 'compat', np.int32), + (np.int64, 0, 2**32 - 1, 'compat', None), + (np.uint64, 0, 2**32 - 1, 'compat', None), + (np.float32, 0, 1e30, 'compat', np.float32), + (np.float64, 0, 1e30, 'compat', np.float32), + (np.float64, 0, 1e40, 'compat', None), + (np.int64, 0, 255, 'smallest', np.uint8), + (np.int64, 0, 256, 'smallest', np.int16), + (np.int64, -1, 255, 'smallest', np.int16), + (np.int64, 0, 32768, 'smallest', np.int32), + (np.int64, 0, 4294967296, 'smallest', None), + (np.float32, 0, 1, 'smallest', None), + (np.float64, 0, 1, 'smallest', None), + ]: arr = np.arange(24, dtype=in_dt).reshape((2, 3, 4)) arr[0, 0, :2] = [mn, mx] img = self.image_class(arr, np.eye(4), dtype=alias) @@ -1167,8 +1172,8 @@ def test_dynamic_dtype_aliases(self): def test_static_dtype_aliases(self): for alias, effective_dt in [ - ("mask", np.uint8), - ]: + ('mask', np.uint8), + ]: for orig_dt in ('u1', 'i8', 'f4'): arr = np.arange(24, dtype=orig_dt).reshape((2, 3, 4)) img = self.image_class(arr, np.eye(4), dtype=alias) @@ -1320,7 +1325,6 @@ def test_nifti_dicom_extension(): assert dcmext.get_content().__class__ == pydicom.dataset.Dataset assert len(dcmext.get_content().values()) == 0 - # use a dataset if provided ds = pydicom.dataset.Dataset() ds.add_new((0x10, 0x20), 'LO', 'NiPy') @@ -1330,9 +1334,9 @@ def test_nifti_dicom_extension(): assert dcmext.get_content().PatientID == 'NiPy' # create a single dicom tag (Patient ID, [0010,0020]) with Explicit VR / LE - dcmbytes_explicit = struct.pack('2H2sH4s', 0x10, 0x20, - 'LO'.encode('utf-8'), 4, - 'NiPy'.encode('utf-8')) + dcmbytes_explicit_be = struct.pack( + '>2H2sH4s', 0x10, 0x20, 'LO'.encode('utf-8'), 4, 'NiPy'.encode('utf-8') + ) hdr_be = Nifti1Header(endianness='>') # Big Endian Nifti1Header dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) assert dcmext.__class__ == Nifti1DicomExtension @@ -1388,11 +1391,12 @@ def test_nifti_dicom_extension(): class TestNifti1General: - """ Test class to test nifti1 in general + """Test class to test nifti1 in general Tests here which mix the pair and the single type, and that should only be run once (not for each type) because they are slow """ + single_class = Nifti1Image pair_class = Nifti1Pair module = nifti1 @@ -1431,7 +1435,7 @@ def test_loadsave_cycle(self): lnim = bytesio_round_trip(wnim) assert lnim.get_data_dtype() == np.int16 # Scaling applied - assert_array_equal(lnim.get_fdata(), data * 2. + 8.) + assert_array_equal(lnim.get_fdata(), data * 2.0 + 8.0) # slope, inter reset by image creation, but saved in proxy assert lnim.header.get_slope_inter() == (None, None) assert (lnim.dataobj.slope, lnim.dataobj.inter) == (2, 8) @@ -1471,7 +1475,7 @@ def test_float_int_spread(self): # Test rounding error for spread of values # Parallel test to arraywriters powers = np.arange(-10, 10, 0.5) - arr = np.concatenate((-10**powers, 10**powers)) + arr = np.concatenate((-(10**powers), 10**powers)) aff = np.eye(4) for in_dt in (np.float32, np.float64): arr_t = arr.astype(in_dt) @@ -1481,8 +1485,7 @@ def test_float_int_spread(self): arr_back_sc = img_back.get_fdata() slope, inter = img_back.header.get_slope_inter() # Get estimate for error - max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, - inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter) # Simulate allclose test with large atol diff = np.abs(arr_t - arr_back_sc) rdiff = diff / np.abs(arr_t) @@ -1505,8 +1508,7 @@ def test_rt_bias(self): slope, inter = img_back.header.get_slope_inter() bias = np.mean(arr_t - arr_back_sc) # Get estimate for error - max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, - inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) assert np.abs(bias) < bias_thresh @@ -1517,16 +1519,17 @@ def test_reoriented_dim_info(self): # Start as RAS aff = np.diag([2, 3, 4, 1]) simg = self.single_class(arr, aff) - for freq, phas, slic in ((0, 1, 2), - (0, 2, 1), - (1, 0, 2), - (2, 0, 1), - (None, None, None), - (0, 2, None), - (0, None, None), - (None, 2, 1), - (None, None, 1), - ): + for freq, phas, slic in ( + (0, 1, 2), + (0, 2, 1), + (1, 0, 2), + (2, 0, 1), + (None, None, None), + (0, 2, None), + (0, None, None), + (None, 2, 1), + (None, None, 1), + ): simg.header.set_dim_info(freq, phas, slic) fdir = 'RAS'[freq] if freq is not None else None pdir = 'RAS'[phas] if phas is not None else None @@ -1545,8 +1548,7 @@ def test_reoriented_dim_info(self): @runif_extra_has('slow') def test_large_nifti1(): image_shape = (91, 109, 91, 1200) - img = Nifti1Image(np.ones(image_shape, dtype=np.float32), - affine=np.eye(4)) + img = Nifti1Image(np.ones(image_shape, dtype=np.float32), affine=np.eye(4)) # Dump and load the large image. with InTemporaryDirectory(): img.to_filename('test.nii.gz') @@ -1554,5 +1556,5 @@ def test_large_nifti1(): data = load('test.nii.gz').get_fdata() # Check that the data are all ones assert image_shape == data.shape - n_ones = np.sum((data == 1.)) + n_ones = np.sum((data == 1.0)) assert np.prod(image_shape) == n_ones diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index 106e3ec787..57a97a1322 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -6,15 +6,14 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for nifti2 reading package """ +"""Tests for nifti2 reading package""" import os import numpy as np from .. import nifti2 -from ..nifti1 import (Nifti1Header, Nifti1PairHeader, Nifti1Extension, - Nifti1Extensions) -from ..nifti2 import (Nifti2Header, Nifti2PairHeader, Nifti2Image, Nifti2Pair) +from ..nifti1 import Nifti1Header, Nifti1PairHeader, Nifti1Extension, Nifti1Extensions +from ..nifti2 import Nifti2Header, Nifti2PairHeader, Nifti2Image, Nifti2Pair from . import test_nifti1 as tn1 @@ -52,10 +51,11 @@ def test_eol_check(self): hdr['eol_check'] = (13, 10, 0, 10) fhdr, message, raiser = self.log_chk(hdr, 40) assert_array_equal(fhdr['eol_check'], good_eol) - assert (message == - 'EOL check not 0 or 13, 10, 26, 10; ' - 'data may be corrupted by EOL conversion; ' - 'setting EOL check to 13, 10, 26, 10') + assert ( + message == 'EOL check not 0 or 13, 10, 26, 10; ' + 'data may be corrupted by EOL conversion; ' + 'setting EOL check to 13, 10, 26, 10' + ) class TestNifti2PairHeader(_Nifti2Mixin, tn1.TestNifti1PairHeader): @@ -79,11 +79,12 @@ class TestNifti2Pair(tn1.TestNifti1Pair): class TestNifti2General(tn1.TestNifti1General): - """ Test class to test nifti2 in general + """Test class to test nifti2 in general Tests here which mix the pair and the single type, and that should only be run once (not for each type) because they are slow """ + single_class = Nifti2Image pair_class = Nifti2Pair module = nifti2 @@ -95,12 +96,14 @@ def test_nifti12_conversion(): dtype_type = np.int64 ext1 = Nifti1Extension(6, b'My comment') ext2 = Nifti1Extension(6, b'Fresh comment') - for in_type, out_type in ((Nifti1Header, Nifti2Header), - (Nifti1PairHeader, Nifti2Header), - (Nifti1PairHeader, Nifti2PairHeader), - (Nifti2Header, Nifti1Header), - (Nifti2PairHeader, Nifti1Header), - (Nifti2PairHeader, Nifti1PairHeader)): + for in_type, out_type in ( + (Nifti1Header, Nifti2Header), + (Nifti1PairHeader, Nifti2Header), + (Nifti1PairHeader, Nifti2PairHeader), + (Nifti2Header, Nifti1Header), + (Nifti2PairHeader, Nifti1Header), + (Nifti2PairHeader, Nifti1PairHeader), + ): in_hdr = in_type() in_hdr.set_data_shape(shape) in_hdr.set_data_dtype(dtype_type) diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index c1609980a3..2659b7fbbc 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -6,6 +6,7 @@ @expires('5.0.0') def test_setattr_on_read(): with pytest.deprecated_call(): + class MagicProp: @setattr_on_read def a(self): diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index b25dc2db6d..2a306079f4 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for openers module """ +"""Test for openers module""" import os import contextlib from gzip import GzipFile @@ -16,12 +16,13 @@ import time from numpy.compat.py3k import asstr, asbytes -from ..openers import (Opener, - ImageOpener, - HAVE_INDEXED_GZIP, - BZ2File, - DeterministicGzipFile, - ) +from ..openers import ( + Opener, + ImageOpener, + HAVE_INDEXED_GZIP, + BZ2File, + DeterministicGzipFile, +) from ..tmpdirs import InTemporaryDirectory from ..optpkg import optional_package @@ -30,7 +31,7 @@ import pytest from ..deprecator import ExpiredDeprecationError -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') class Lunk: @@ -74,16 +75,13 @@ def test_Opener(): def test_Opener_various(): # Check we can do all sorts of files here - message = b"Oh what a giveaway" + message = b'Oh what a giveaway' bz2_fileno = hasattr(BZ2File, 'fileno') if HAVE_INDEXED_GZIP: import indexed_gzip as igzip with InTemporaryDirectory(): sobj = BytesIO() - files_to_test = ['test.txt', - 'test.txt.gz', - 'test.txt.bz2', - sobj] + files_to_test = ['test.txt', 'test.txt.gz', 'test.txt.bz2', sobj] if HAVE_ZSTD: files_to_test += ['test.txt.zst'] for input in files_to_test: @@ -104,8 +102,11 @@ def test_Opener_various(): fobj.fileno() # indexed gzip is used by default, and drops file # handles by default, so we don't have a fileno. - elif input.endswith('gz') and HAVE_INDEXED_GZIP and \ - Version(igzip.__version__) >= Version('0.7.0'): + elif ( + input.endswith('gz') + and HAVE_INDEXED_GZIP + and Version(igzip.__version__) >= Version('0.7.0') + ): with pytest.raises(igzip.NoHandleError): fobj.fileno() else: @@ -127,9 +128,9 @@ def patch_indexed_gzip(state): values = (True, MockIndexedGzipFile) else: values = (False, GzipFile) - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), \ - mock.patch('nibabel.openers.IndexedGzipFile', values[1], - create=True): + with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), mock.patch( + 'nibabel.openers.IndexedGzipFile', values[1], create=True + ): yield @@ -148,14 +149,14 @@ def test_Opener_gzip_type(): # Each test is specified by a tuple containing: # (indexed_gzip present, Opener kwargs, expected file type) tests = [ - (False, {'mode' : 'rb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'rb', 'keep_open' : False}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : False}, GzipFile), - (True, {'mode' : 'rb', 'keep_open' : True}, MockIndexedGzipFile), - (True, {'mode' : 'rb', 'keep_open' : False}, MockIndexedGzipFile), - (True, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (True, {'mode' : 'wb', 'keep_open' : False}, GzipFile), + (False, {'mode': 'rb', 'keep_open': True}, GzipFile), + (False, {'mode': 'rb', 'keep_open': False}, GzipFile), + (False, {'mode': 'wb', 'keep_open': True}, GzipFile), + (False, {'mode': 'wb', 'keep_open': False}, GzipFile), + (True, {'mode': 'rb', 'keep_open': True}, MockIndexedGzipFile), + (True, {'mode': 'rb', 'keep_open': False}, MockIndexedGzipFile), + (True, {'mode': 'wb', 'keep_open': True}, GzipFile), + (True, {'mode': 'wb', 'keep_open': False}, GzipFile), ] for test in tests: @@ -195,7 +196,7 @@ def file_opener(fileish, mode): def test_file_like_wrapper(): # Test wrapper using BytesIO (full API) - message = b"History of the nude in" + message = b'History of the nude in' sobj = BytesIO() fobj = Opener(sobj) assert fobj.tell() == 0 @@ -221,6 +222,7 @@ def test_compressionlevel(): class MyOpener(Opener): default_compresslevel = 5 + with InTemporaryDirectory(): for ext in ('gz', 'bz2', 'GZ', 'gZ', 'BZ2', 'Bz2'): for opener, default_val in ((Opener, 1), (MyOpener, 5)): @@ -245,6 +247,7 @@ def test_compressed_ext_case(): class StrictOpener(Opener): compress_ext_icase = False + exts = ('gz', 'bz2', 'GZ', 'gZ', 'BZ2', 'Bz2') if HAVE_ZSTD: exts += ('zst', 'ZST', 'Zst') @@ -283,11 +286,7 @@ def test_name(): sobj = BytesIO() lunk = Lunk('in ART') with InTemporaryDirectory(): - files_to_test = ['test.txt', - 'test.txt.gz', - 'test.txt.bz2', - sobj, - lunk] + files_to_test = ['test.txt', 'test.txt.gz', 'test.txt.bz2', sobj, lunk] if HAVE_ZSTD: files_to_test += ['test.txt.zst'] for input in files_to_test: @@ -307,6 +306,7 @@ def test_set_extensions(): class MyOpener(Opener): compress_ext_map = Opener.compress_ext_map.copy() compress_ext_map['.glrph'] = Opener.gz_def + with MyOpener('test.glrph', 'w') as fobj: assert hasattr(fobj.fobj, 'compress') @@ -316,11 +316,7 @@ def test_close_if_mine(): with InTemporaryDirectory(): sobj = BytesIO() lunk = Lunk('') - for input in ('test.txt', - 'test.txt.gz', - 'test.txt.bz2', - sobj, - lunk): + for input in ('test.txt', 'test.txt.gz', 'test.txt.bz2', sobj, lunk): fobj = Opener(input, 'wb') # gzip objects have no 'closed' attribute has_closed = hasattr(fobj.fobj, 'closed') @@ -334,18 +330,21 @@ def test_close_if_mine(): def test_iter(): # Check we can iterate over lines, if the underlying file object allows it - lines = \ - """On the + lines = """On the blue ridged mountains of virginia -""".split('\n') +""".split( + '\n' + ) with InTemporaryDirectory(): sobj = BytesIO() - files_to_test = [('test.txt', True), - ('test.txt.gz', False), - ('test.txt.bz2', False), - (sobj, True)] + files_to_test = [ + ('test.txt', True), + ('test.txt.gz', False), + ('test.txt.bz2', False), + (sobj, True), + ] if HAVE_ZSTD: files_to_test += [('test.txt.zst', False)] for input, does_t in files_to_test: @@ -366,7 +365,7 @@ def test_iter(): def md5sum(fname): - with open(fname, "rb") as fobj: + with open(fname, 'rb') as fobj: return hashlib.md5(fobj.read()).hexdigest() @@ -375,82 +374,82 @@ def test_DeterministicGzipFile(): msg = b"Hello, I'd like to have an argument." # No filename, no mtime - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj, mtime=0) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj, mtime=0) as gzobj: gzobj.write(msg) - anon_chksum = md5sum("ref.gz") + anon_chksum = md5sum('ref.gz') - with DeterministicGzipFile("default.gz", "wb") as fobj: + with DeterministicGzipFile('default.gz', 'wb') as fobj: internal_fobj = fobj.myfileobj fobj.write(msg) # Check that myfileobj is being closed by GzipFile.close() # This is in case GzipFile changes its internal implementation assert internal_fobj.closed - assert md5sum("default.gz") == anon_chksum + assert md5sum('default.gz') == anon_chksum # No filename, current mtime now = time.time() - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj, mtime=now) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj, mtime=now) as gzobj: gzobj.write(msg) - now_chksum = md5sum("ref.gz") + now_chksum = md5sum('ref.gz') - with DeterministicGzipFile("now.gz", "wb", mtime=now) as fobj: + with DeterministicGzipFile('now.gz', 'wb', mtime=now) as fobj: fobj.write(msg) - assert md5sum("now.gz") == now_chksum + assert md5sum('now.gz') == now_chksum # Change in default behavior - with mock.patch("time.time") as t: + with mock.patch('time.time') as t: t.return_value = now # GzipFile will use time.time() - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - assert md5sum("ref.gz") == now_chksum + assert md5sum('ref.gz') == now_chksum # DeterministicGzipFile will use 0 - with DeterministicGzipFile("now.gz", "wb") as fobj: + with DeterministicGzipFile('now.gz', 'wb') as fobj: fobj.write(msg) - assert md5sum("now.gz") == anon_chksum + assert md5sum('now.gz') == anon_chksum # GzipFile is filename dependent, DeterministicGzipFile is independent - with GzipFile("filenameA.gz", mode="wb", mtime=0) as gzobj: + with GzipFile('filenameA.gz', mode='wb', mtime=0) as gzobj: gzobj.write(msg) - fnameA_chksum = md5sum("filenameA.gz") + fnameA_chksum = md5sum('filenameA.gz') assert fnameA_chksum != anon_chksum - with DeterministicGzipFile("filenameA.gz", "wb") as fobj: + with DeterministicGzipFile('filenameA.gz', 'wb') as fobj: fobj.write(msg) # But the contents are the same with different filenames - assert md5sum("filenameA.gz") == anon_chksum + assert md5sum('filenameA.gz') == anon_chksum def test_DeterministicGzipFile_fileobj(): with InTemporaryDirectory(): msg = b"Hello, I'd like to have an argument." - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj, mtime=0) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj, mtime=0) as gzobj: gzobj.write(msg) - ref_chksum = md5sum("ref.gz") + ref_chksum = md5sum('ref.gz') - with open("test.gz", "wb") as fobj: - with DeterministicGzipFile(filename="", mode="wb", fileobj=fobj) as gzobj: + with open('test.gz', 'wb') as fobj: + with DeterministicGzipFile(filename='', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum("test.gz") == ref_chksum + md5sum('test.gz') == ref_chksum - with open("test.gz", "wb") as fobj: - with DeterministicGzipFile(fileobj=fobj, mode="wb") as gzobj: + with open('test.gz', 'wb') as fobj: + with DeterministicGzipFile(fileobj=fobj, mode='wb') as gzobj: gzobj.write(msg) - md5sum("test.gz") == ref_chksum + md5sum('test.gz') == ref_chksum - with open("test.gz", "wb") as fobj: - with DeterministicGzipFile(filename="test.gz", mode="wb", fileobj=fobj) as gzobj: + with open('test.gz', 'wb') as fobj: + with DeterministicGzipFile(filename='test.gz', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum("test.gz") == ref_chksum + md5sum('test.gz') == ref_chksum def test_bitwise_determinism(): @@ -458,31 +457,29 @@ def test_bitwise_determinism(): msg = b"Hello, I'd like to have an argument." # Canonical reference: No filename, no mtime # Use default compresslevel - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", - compresslevel=1, fileobj=fobj, - mtime=0) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', compresslevel=1, fileobj=fobj, mtime=0) as gzobj: gzobj.write(msg) - anon_chksum = md5sum("ref.gz") + anon_chksum = md5sum('ref.gz') # Different times, different filenames now = time.time() - with mock.patch("time.time") as t: + with mock.patch('time.time') as t: t.return_value = now - with Opener("a.gz", "wb") as fobj: + with Opener('a.gz', 'wb') as fobj: fobj.write(msg) t.return_value = now + 1 - with Opener("b.gz", "wb") as fobj: + with Opener('b.gz', 'wb') as fobj: fobj.write(msg) - assert md5sum("a.gz") == anon_chksum - assert md5sum("b.gz") == anon_chksum + assert md5sum('a.gz') == anon_chksum + assert md5sum('b.gz') == anon_chksum # Users can still set mtime, but filenames will not be embedded - with Opener("filenameA.gz", "wb", mtime=0xCAFE10C0) as fobj: + with Opener('filenameA.gz', 'wb', mtime=0xCAFE10C0) as fobj: fobj.write(msg) - with Opener("filenameB.gz", "wb", mtime=0xCAFE10C0) as fobj: + with Opener('filenameB.gz', 'wb', mtime=0xCAFE10C0) as fobj: fobj.write(msg) - fnameA_chksum = md5sum("filenameA.gz") - fnameB_chksum = md5sum("filenameB.gz") + fnameA_chksum = md5sum('filenameA.gz') + fnameB_chksum = md5sum('filenameB.gz') assert fnameA_chksum == fnameB_chksum != anon_chksum diff --git a/nibabel/tests/test_optpkg.py b/nibabel/tests/test_optpkg.py index 72430aea37..875c32bbdf 100644 --- a/nibabel/tests/test_optpkg.py +++ b/nibabel/tests/test_optpkg.py @@ -1,4 +1,4 @@ -""" Testing optpkg module +"""Testing optpkg module """ from unittest import mock @@ -41,12 +41,15 @@ def test_basic(): # Only disrupt imports for "nottriedbefore" package orig_import = builtins.__import__ + def raise_Exception(*args, **kwargs): if args[0] == 'nottriedbefore': raise Exception( - "non ImportError could be thrown by some malfunctioning module " - "upon import, and optional_package should catch it too") + 'non ImportError could be thrown by some malfunctioning module ' + 'upon import, and optional_package should catch it too' + ) return orig_import(*args, **kwargs) + with mock.patch.object(builtins, '__import__', side_effect=raise_Exception): assert_bad('nottriedbefore') diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 0b3b8081d0..5d786c0eac 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing for orientations module """ +"""Testing for orientations module""" import numpy as np import warnings @@ -15,68 +15,54 @@ from numpy.testing import assert_array_equal -from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, - flip_axis, apply_orientation, OrientationError, - ornt2axcodes, axcodes2ornt, aff2axcodes) +from ..orientations import ( + io_orientation, + ornt_transform, + inv_ornt_aff, + flip_axis, + apply_orientation, + OrientationError, + ornt2axcodes, + axcodes2ornt, + aff2axcodes, +) from ..affines import from_matvec, to_matvec from ..testing import expires -IN_ARRS = [np.eye(4), - [[0, 0, 1, 0], - [0, 1, 0, 0], - [1, 0, 0, 0], - [0, 0, 0, 1]], - [[0, 1, 0, 0], - [0, 0, 1, 0], - [1, 0, 0, 0], - [0, 0, 0, 1]], - [[3, 1, 0, 0], - [1, 3, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]], - [[1, 3, 0, 0], - [3, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]], - ] - -OUT_ORNTS = [[[0, 1], - [1, 1], - [2, 1]], - [[2, 1], - [1, 1], - [0, 1]], - [[2, 1], - [0, 1], - [1, 1]], - [[0, 1], - [1, 1], - [2, 1]], - [[1, 1], - [0, 1], - [2, 1]], - ] - -IN_ARRS = IN_ARRS + [[[np.cos(np.pi / 6 + i * np.pi / 2), np.sin(np.pi / 6 + i * np.pi / 2), 0, 0], - [-np.sin(np.pi / 6 + i * np.pi / 2), np.cos(np.pi / 6 + i * np.pi / 2), 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]] for i in range(4)] - -OUT_ORNTS = OUT_ORNTS + [[[0, 1], - [1, 1], - [2, 1]], - [[1, -1], - [0, 1], - [2, 1]], - [[0, -1], - [1, -1], - [2, 1]], - [[1, 1], - [0, -1], - [2, 1]] - ] +IN_ARRS = [ + np.eye(4), + [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], + [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]], + [[3, 1, 0, 0], [1, 3, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + [[1, 3, 0, 0], [3, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], +] + +OUT_ORNTS = [ + [[0, 1], [1, 1], [2, 1]], + [[2, 1], [1, 1], [0, 1]], + [[2, 1], [0, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]], + [[1, 1], [0, 1], [2, 1]], +] + +IN_ARRS = IN_ARRS + [ + [ + [np.cos(np.pi / 6 + i * np.pi / 2), np.sin(np.pi / 6 + i * np.pi / 2), 0, 0], + [-np.sin(np.pi / 6 + i * np.pi / 2), np.cos(np.pi / 6 + i * np.pi / 2), 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + for i in range(4) +] + +OUT_ORNTS = OUT_ORNTS + [ + [[0, 1], [1, 1], [2, 1]], + [[1, -1], [0, 1], [2, 1]], + [[0, -1], [1, -1], [2, 1]], + [[1, 1], [0, -1], [2, 1]], +] IN_ARRS = [np.array(arr) for arr in IN_ARRS] @@ -84,15 +70,27 @@ _LABELS = ['RL', 'AP', 'SI'] -ALL_AXCODES = [(_LABELS[i0][j0], _LABELS[i1][j1], _LABELS[i2][j2]) - for i0 in range(3) for i1 in range(3) for i2 in range(3) - if i0 != i1 != i2 != i0 - for j0 in range(2) for j1 in range(2) for j2 in range(2)] - -ALL_ORNTS = [[[i0, j0], [i1, j1], [i2, j2]] - for i0 in range(3) for i1 in range(3) for i2 in range(3) - if i0 != i1 != i2 != i0 - for j0 in [1, -1] for j1 in [1, -1] for j2 in [1, -1]] +ALL_AXCODES = [ + (_LABELS[i0][j0], _LABELS[i1][j1], _LABELS[i2][j2]) + for i0 in range(3) + for i1 in range(3) + for i2 in range(3) + if i0 != i1 != i2 != i0 + for j0 in range(2) + for j1 in range(2) + for j2 in range(2) +] + +ALL_ORNTS = [ + [[i0, j0], [i1, j1], [i2, j2]] + for i0 in range(3) + for i1 in range(3) + for i2 in range(3) + if i0 != i1 != i2 != i0 + for j0 in [1, -1] + for j1 in [1, -1] + for j2 in [1, -1] +] def same_transform(taff, ornt, shape): @@ -162,32 +160,23 @@ def test_io_orientation(): rzs = np.c_[np.diag([2, 3, 4, 5]), np.zeros((4, 3))] arr = from_matvec(rzs, [15, 16, 17, 18]) ornt = io_orientation(arr) - assert_array_equal(ornt, [[0, 1], - [1, 1], - [2, 1], - [3, 1], - [np.nan, np.nan], - [np.nan, np.nan], - [np.nan, np.nan]]) + assert_array_equal( + ornt, + [[0, 1], [1, 1], [2, 1], [3, 1], [np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], + ) # Test behavior of thresholding - def_aff = np.array([[1., 1, 0, 0], - [0, 0, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]) - fail_tol = np.array([[0, 1], - [np.nan, np.nan], - [2, 1]]) - pass_tol = np.array([[0, 1], - [1, 1], - [2, 1]]) + def_aff = np.array([[1.0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) + fail_tol = np.array([[0, 1], [np.nan, np.nan], [2, 1]]) + pass_tol = np.array([[0, 1], [1, 1], [2, 1]]) eps = np.finfo(float).eps # Test that a Y axis appears as we increase the difference between the # first two columns - for y_val, has_y in ((0, False), - (eps, False), - (eps * 5, False), - (eps * 10, True), - ): + for y_val, has_y in ( + (0, False), + (eps, False), + (eps * 5, False), + (eps * 10, True), + ): def_aff[1, 1] = y_val res = pass_tol if has_y else fail_tol assert_array_equal(io_orientation(def_aff), res) @@ -202,68 +191,50 @@ def test_io_orientation(): aff_extra_col[-1, -1] = 1 # Not strictly necessary, but for completeness aff_extra_col[:3, :3] = mat aff_extra_col[:3, -1] = vec - assert_array_equal(io_orientation(aff_extra_col, tol=1e-5), - [[0, 1], - [np.nan, np.nan], - [2, 1], - [np.nan, np.nan]]) + assert_array_equal( + io_orientation(aff_extra_col, tol=1e-5), + [[0, 1], [np.nan, np.nan], [2, 1], [np.nan, np.nan]], + ) aff_extra_row = np.zeros((5, 4)) aff_extra_row[-1, -1] = 1 # Not strictly necessary, but for completeness aff_extra_row[:3, :3] = mat aff_extra_row[:3, -1] = vec - assert_array_equal(io_orientation(aff_extra_row, tol=1e-5), - [[0, 1], - [np.nan, np.nan], - [2, 1]]) + assert_array_equal(io_orientation(aff_extra_row, tol=1e-5), [[0, 1], [np.nan, np.nan], [2, 1]]) def test_ornt_transform(): - assert_array_equal(ornt_transform([[0, 1], [1, 1], [2, -1]], - [[1, 1], [0, 1], [2, 1]]), - [[1, 1], [0, 1], [2, -1]] - ) - assert_array_equal(ornt_transform([[0, 1], [1, 1], [2, 1]], - [[2, 1], [0, -1], [1, 1]]), - [[1, -1], [2, 1], [0, 1]] - ) + assert_array_equal( + ornt_transform([[0, 1], [1, 1], [2, -1]], [[1, 1], [0, 1], [2, 1]]), + [[1, 1], [0, 1], [2, -1]], + ) + assert_array_equal( + ornt_transform([[0, 1], [1, 1], [2, 1]], [[2, 1], [0, -1], [1, 1]]), + [[1, -1], [2, 1], [0, 1]], + ) # Must have same shape with pytest.raises(ValueError): ornt_transform([[0, 1], [1, 1]], [[0, 1], [1, 1], [2, 1]]) # Must be (N,2) in shape with pytest.raises(ValueError): - ornt_transform([[0, 1, 1], [1, 1, 1]], - [[0, 1, 1], [1, 1, 1]]) + ornt_transform([[0, 1, 1], [1, 1, 1]], [[0, 1, 1], [1, 1, 1]]) # Target axes must exist in source with pytest.raises(ValueError): - ornt_transform([[0, 1], [1, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]]) + ornt_transform([[0, 1], [1, 1], [1, 1]], [[0, 1], [1, 1], [2, 1]]) def test_ornt2axcodes(): # Recoding orientation to axis codes labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) - assert ornt2axcodes([[0, 1], - [1, 1], - [2, 1]], labels) == ('right', 'front', 'up') - assert ornt2axcodes([[0, -1], - [1, -1], - [2, -1]], labels) == ('left', 'back', 'down') - assert ornt2axcodes([[2, -1], - [1, -1], - [0, -1]], labels) == ('down', 'back', 'left') - assert ornt2axcodes([[1, 1], - [2, -1], - [0, 1]], labels) == ('front', 'down', 'right') + assert ornt2axcodes([[0, 1], [1, 1], [2, 1]], labels) == ('right', 'front', 'up') + assert ornt2axcodes([[0, -1], [1, -1], [2, -1]], labels) == ('left', 'back', 'down') + assert ornt2axcodes([[2, -1], [1, -1], [0, -1]], labels) == ('down', 'back', 'left') + assert ornt2axcodes([[1, 1], [2, -1], [0, 1]], labels) == ('front', 'down', 'right') # default is RAS output directions - assert ornt2axcodes([[0, 1], - [1, 1], - [2, 1]]) == ('R', 'A', 'S') + assert ornt2axcodes([[0, 1], [1, 1], [2, 1]]) == ('R', 'A', 'S') # dropped axes produce None - assert ornt2axcodes([[0, 1], - [np.nan, np.nan], - [2, 1]]) == ('R', None, 'S') + assert ornt2axcodes([[0, 1], [np.nan, np.nan], [2, 1]]) == ('R', None, 'S') # Non integer axes raises error with pytest.raises(ValueError): ornt2axcodes([[0.1, 1]]) @@ -278,61 +249,35 @@ def test_ornt2axcodes(): def test_axcodes2ornt(): # Go from axcodes back to orientations labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) - assert_array_equal(axcodes2ornt(('right', 'front', 'up'), labels), - [[0, 1], - [1, 1], - [2, 1]] - ) - assert_array_equal(axcodes2ornt(('left', 'back', 'down'), labels), - [[0, -1], - [1, -1], - [2, -1]] - ) - assert_array_equal(axcodes2ornt(('down', 'back', 'left'), labels), - [[2, -1], - [1, -1], - [0, -1]] - ) - assert_array_equal(axcodes2ornt(('front', 'down', 'right'), labels), - [[1, 1], - [2, -1], - [0, 1]] - ) + assert_array_equal(axcodes2ornt(('right', 'front', 'up'), labels), [[0, 1], [1, 1], [2, 1]]) + assert_array_equal(axcodes2ornt(('left', 'back', 'down'), labels), [[0, -1], [1, -1], [2, -1]]) + assert_array_equal(axcodes2ornt(('down', 'back', 'left'), labels), [[2, -1], [1, -1], [0, -1]]) + assert_array_equal(axcodes2ornt(('front', 'down', 'right'), labels), [[1, 1], [2, -1], [0, 1]]) # default is RAS output directions default = np.c_[range(3), [1] * 3] assert_array_equal(axcodes2ornt(('R', 'A', 'S')), default) # dropped axes produce None - assert_array_equal(axcodes2ornt(('R', None, 'S')), - [[0, 1], - [np.nan, np.nan], - [2, 1]] - ) + assert_array_equal(axcodes2ornt(('R', None, 'S')), [[0, 1], [np.nan, np.nan], [2, 1]]) # Missing axcodes raise an error assert_array_equal(axcodes2ornt('RAS'), default) with pytest.raises(ValueError): axcodes2ornt('rAS') # None is OK as axis code - assert_array_equal(axcodes2ornt(('R', None, 'S')), - [[0, 1], - [np.nan, np.nan], - [2, 1]]) + assert_array_equal(axcodes2ornt(('R', None, 'S')), [[0, 1], [np.nan, np.nan], [2, 1]]) # Bad axis code with None also raises error. with pytest.raises(ValueError): axcodes2ornt(('R', None, 's')) # Axis codes checked with custom labels labels = ('SD', 'BF', 'lh') - assert_array_equal(axcodes2ornt('BlD', labels), - [[1, -1], - [2, -1], - [0, 1]]) + assert_array_equal(axcodes2ornt('BlD', labels), [[1, -1], [2, -1], [0, 1]]) with pytest.raises(ValueError): axcodes2ornt('blD', labels) # Duplicate labels - for labels in [('SD', 'BF', 'lD'),('SD', 'SF', 'lD')]: + for labels in [('SD', 'BF', 'lD'), ('SD', 'SF', 'lD')]: with pytest.raises(ValueError): axcodes2ornt('blD', labels) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 22e805cb8f..0eca2fdca4 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -1,4 +1,4 @@ -""" Testing parrec module +"""Testing parrec module """ from os.path import join as pjoin, dirname, basename @@ -11,18 +11,24 @@ from .. import load as top_load from ..nifti1 import Nifti1Image, Nifti1Extension, Nifti1Header from .. import parrec -from ..parrec import (parse_PAR_header, PARRECHeader, PARRECError, vol_numbers, - vol_is_full, PARRECImage, PARRECArrayProxy, exts2pars) +from ..parrec import ( + parse_PAR_header, + PARRECHeader, + PARRECError, + vol_numbers, + vol_is_full, + PARRECImage, + PARRECArrayProxy, + exts2pars, +) from ..openers import ImageOpener from ..fileholders import FileHolder from ..volumeutils import array_from_file -from numpy.testing import (assert_almost_equal, - assert_array_equal) +from numpy.testing import assert_almost_equal, assert_array_equal import pytest -from ..testing import (clear_and_catch_warnings, suppress_warnings, - assert_arr_dict_equal) +from ..testing import clear_and_catch_warnings, suppress_warnings, assert_arr_dict_equal from .test_arrayproxy import check_mmap from . import test_spatialimages as tsi @@ -53,80 +59,120 @@ VARIANT_PAR = pjoin(DATA_PATH, 'variant_v4_2_header.PAR') # Affine as we determined it mid-2014 AN_OLD_AFFINE = np.array( - [[-3.64994708, 0., 1.83564171, 123.66276611], - [0., -3.75, 0., 115.617], - [0.86045705, 0., 7.78655376, -27.91161211], - [0., 0., 0., 1.]]) + [ + [-3.64994708, 0.0, 1.83564171, 123.66276611], + [0.0, -3.75, 0.0, 115.617], + [0.86045705, 0.0, 7.78655376, -27.91161211], + [0.0, 0.0, 0.0, 1.0], + ] +) # Affine from Philips-created NIfTI PHILIPS_AFFINE = np.array( - [[-3.65, -0.0016, 1.8356, 125.4881], - [0.0016, -3.75, -0.0004, 117.4916], - [0.8604, 0.0002, 7.7866, -28.3411], - [0., 0., 0., 1.]]) + [ + [-3.65, -0.0016, 1.8356, 125.4881], + [0.0016, -3.75, -0.0004, 117.4916], + [0.8604, 0.0002, 7.7866, -28.3411], + [0.0, 0.0, 0.0, 1.0], + ] +) # Affines generated by parrec.py from test data in many orientations # Data from http://psydata.ovgu.de/philips_achieva_testfiles/conversion2 PREVIOUS_AFFINES = { - "Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1": - npa([[-3., 0., 0., 118.5], - [0., -0.77645714, -3.18755523, 72.82738377], - [0., -2.89777748, 0.85410285, 97.80720486], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_cor_SENSE_8_1": - npa([[-3., 0., 0., 118.5], - [0., 0., -3.3, 64.35], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_15AP_SENSE_13_1": - npa([[0., 0.77645714, 3.18755523, -92.82738377], - [-3., 0., 0., 118.5], - [0., -2.89777748, 0.85410285, 97.80720486], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_15FH_SENSE_12_1": - npa([[0.77645714, 0., 3.18755523, -92.82738377], - [-2.89777748, 0., 0.85410285, 97.80720486], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_15RL_SENSE_11_1": - npa([[0., 0., 3.3, -64.35], - [-2.89777748, -0.77645714, 0., 145.13226726], - [0.77645714, -2.89777748, 0., 83.79215357], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_SENSE_7_1": - npa([[0., 0., 3.3, -64.35], - [-3., 0., 0., 118.5], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_-30AP_10RL_20FH_SENSE_14_1": - npa([[0., 0., 3.3, -74.35], - [-3., 0., 0., 148.5], - [0., -3., 0., 138.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_15FH_SENSE_9_1": - npa([[0.77645714, 0., 3.18755523, -92.82738377], - [-2.89777748, 0., 0.85410285, 97.80720486], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_15RL_SENSE_10_1": - npa([[0., 0., 3.3, -64.35], - [-2.89777748, -0.77645714, 0., 145.13226726], - [0.77645714, -2.89777748, 0., 83.79215357], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_SENSE_6_1": - npa([[-3., 0., 0., 118.5], - [0., -3., 0., 118.5], - [0., 0., 3.3, -64.35], - [0., 0., 0., 1.]]), + 'Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1': npa( + [ + [-3.0, 0.0, 0.0, 118.5], + [0.0, -0.77645714, -3.18755523, 72.82738377], + [0.0, -2.89777748, 0.85410285, 97.80720486], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_cor_SENSE_8_1': npa( + [ + [-3.0, 0.0, 0.0, 118.5], + [0.0, 0.0, -3.3, 64.35], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_15AP_SENSE_13_1': npa( + [ + [0.0, 0.77645714, 3.18755523, -92.82738377], + [-3.0, 0.0, 0.0, 118.5], + [0.0, -2.89777748, 0.85410285, 97.80720486], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_15FH_SENSE_12_1': npa( + [ + [0.77645714, 0.0, 3.18755523, -92.82738377], + [-2.89777748, 0.0, 0.85410285, 97.80720486], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_15RL_SENSE_11_1': npa( + [ + [0.0, 0.0, 3.3, -64.35], + [-2.89777748, -0.77645714, 0.0, 145.13226726], + [0.77645714, -2.89777748, 0.0, 83.79215357], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_SENSE_7_1': npa( + [ + [0.0, 0.0, 3.3, -64.35], + [-3.0, 0.0, 0.0, 118.5], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_-30AP_10RL_20FH_SENSE_14_1': npa( + [ + [0.0, 0.0, 3.3, -74.35], + [-3.0, 0.0, 0.0, 148.5], + [0.0, -3.0, 0.0, 138.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_15FH_SENSE_9_1': npa( + [ + [0.77645714, 0.0, 3.18755523, -92.82738377], + [-2.89777748, 0.0, 0.85410285, 97.80720486], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_15RL_SENSE_10_1': npa( + [ + [0.0, 0.0, 3.3, -64.35], + [-2.89777748, -0.77645714, 0.0, 145.13226726], + [0.77645714, -2.89777748, 0.0, 83.79215357], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_SENSE_6_1': npa( + [ + [-3.0, 0.0, 0.0, 118.5], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 3.3, -64.35], + [0.0, 0.0, 0.0, 1.0], + ] + ), } # Original values for b values in DTI.PAR, still in PSL orientation -DTI_PAR_BVECS = np.array([[-0.667, -0.667, -0.333], - [-0.333, 0.667, -0.667], - [-0.667, 0.333, 0.667], - [-0.707, -0.000, -0.707], - [-0.707, 0.707, 0.000], - [-0.000, 0.707, 0.707], - [0.000, 0.000, 0.000], - [0.000, 0.000, 0.000]]) +DTI_PAR_BVECS = np.array( + [ + [-0.667, -0.667, -0.333], + [-0.333, 0.667, -0.667], + [-0.667, 0.333, 0.667], + [-0.707, -0.000, -0.707], + [-0.707, 0.707, 0.000], + [-0.000, 0.707, 0.707], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000], + ] +) # DTI.PAR values for bvecs DTI_PAR_BVALS = [1000] * 6 + [0, 1000] @@ -143,11 +189,9 @@ # use our own affine as determined from a previous load in nibabel affine=AN_OLD_AFFINE, zooms=(3.75, 3.75, 8.0, 2.0), - data_summary=dict( - min=0.0, - max=2299.4110643863678, - mean=194.95876256117265), - is_proxy=True) + data_summary=dict(min=0.0, max=2299.4110643863678, mean=194.95876256117265), + is_proxy=True, + ) ] @@ -179,8 +223,7 @@ def test_header(): assert hdr.get_data_dtype() == np.dtype('= ver), \ - "nibabel.info.VERSION does not match latest tag information" + fallback >= ver + ), 'nibabel.info.VERSION does not match latest tag information' def test_cmp_pkg_version_0(): @@ -56,42 +57,44 @@ def test_cmp_pkg_version_0(): assert cmp_pkg_version(stage2, stage1) == 1 -@pytest.mark.parametrize("test_ver, pkg_ver, exp_out", - [ - ('1.0', '1.0', 0), - ('1.0.0', '1.0', 0), - ('1.0', '1.0.0', 0), - ('1.1', '1.1', 0), - ('1.2', '1.1', 1), - ('1.1', '1.2', -1), - ('1.1.1', '1.1.1', 0), - ('1.1.2', '1.1.1', 1), - ('1.1.1', '1.1.2', -1), - ('1.1', '1.1dev', 1), - ('1.1dev', '1.1', -1), - ('1.2.1', '1.2.1rc1', 1), - ('1.2.1rc1', '1.2.1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1b', '1.2.1a', 1), - ('1.2.1a', '1.2.1b', -1), - ('1.2.0+1', '1.2', 1), - ('1.2', '1.2.0+1', -1), - ('1.2.1+1', '1.2.1', 1), - ('1.2.1', '1.2.1+1', -1), - ('1.2.1rc1+1', '1.2.1', -1), - ('1.2.1', '1.2.1rc1+1', 1), - ('1.2.1rc1+1', '1.2.1+1', -1), - ('1.2.1+1', '1.2.1rc1+1', 1), - ]) +@pytest.mark.parametrize( + 'test_ver, pkg_ver, exp_out', + [ + ('1.0', '1.0', 0), + ('1.0.0', '1.0', 0), + ('1.0', '1.0.0', 0), + ('1.1', '1.1', 0), + ('1.2', '1.1', 1), + ('1.1', '1.2', -1), + ('1.1.1', '1.1.1', 0), + ('1.1.2', '1.1.1', 1), + ('1.1.1', '1.1.2', -1), + ('1.1', '1.1dev', 1), + ('1.1dev', '1.1', -1), + ('1.2.1', '1.2.1rc1', 1), + ('1.2.1rc1', '1.2.1', -1), + ('1.2.1rc1', '1.2.1rc', 1), + ('1.2.1rc', '1.2.1rc1', -1), + ('1.2.1rc1', '1.2.1rc', 1), + ('1.2.1rc', '1.2.1rc1', -1), + ('1.2.1b', '1.2.1a', 1), + ('1.2.1a', '1.2.1b', -1), + ('1.2.0+1', '1.2', 1), + ('1.2', '1.2.0+1', -1), + ('1.2.1+1', '1.2.1', 1), + ('1.2.1', '1.2.1+1', -1), + ('1.2.1rc1+1', '1.2.1', -1), + ('1.2.1', '1.2.1rc1+1', 1), + ('1.2.1rc1+1', '1.2.1+1', -1), + ('1.2.1+1', '1.2.1rc1+1', 1), + ], +) def test_cmp_pkg_version_1(test_ver, pkg_ver, exp_out): # Test version comparator assert cmp_pkg_version(test_ver, pkg_ver) == exp_out -@pytest.mark.parametrize("args", [['foo.2'], ['foo.2', '1.0'], ['1.0', 'foo.2'], ['foo']]) +@pytest.mark.parametrize('args', [['foo.2'], ['foo.2', '1.0'], ['1.0', 'foo.2'], ['foo']]) def test_cmp_pkg_version_error(args): with pytest.raises(ValueError): cmp_pkg_version(*args) diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index 3c2a70a8c4..cd7c1830ea 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing processing module +"""Testing processing module """ from os.path import dirname, join as pjoin @@ -16,17 +16,23 @@ import numpy.linalg as npl from nibabel.optpkg import optional_package + spnd, have_scipy, _ = optional_package('scipy.ndimage') import nibabel as nib -from nibabel.processing import (sigma2fwhm, fwhm2sigma, adapt_affine, - resample_from_to, resample_to_output, smooth_image, - conform) +from nibabel.processing import ( + sigma2fwhm, + fwhm2sigma, + adapt_affine, + resample_from_to, + resample_to_output, + smooth_image, + conform, +) from nibabel.nifti1 import Nifti1Image from nibabel.nifti2 import Nifti2Image from nibabel.orientations import aff2axcodes, inv_ornt_aff -from nibabel.affines import (AffineError, from_matvec, to_matvec, apply_affine, - voxel_sizes) +from nibabel.affines import AffineError, from_matvec, to_matvec, apply_affine, voxel_sizes from nibabel.eulerangles import euler2mat from numpy.testing import assert_almost_equal, assert_array_equal @@ -44,9 +50,13 @@ from .test_imageclasses import MINC_3DS, MINC_4DS # Filenames of other images that should work correctly with processing -OTHER_IMGS = ('anatomical.nii', 'functional.nii', - 'example4d.nii.gz', 'example_nifti2.nii.gz', - 'phantom_EPI_asc_CLEAR_2_1.PAR') +OTHER_IMGS = ( + 'anatomical.nii', + 'functional.nii', + 'example4d.nii.gz', + 'example_nifti2.nii.gz', + 'phantom_EPI_asc_CLEAR_2_1.PAR', +) def test_sigma2fwhm(): @@ -68,27 +78,17 @@ def test_adapt_affine(): # For 4x4 affine, 3D image, no-op assert_array_equal(adapt_affine(aff_3d, 3), aff_3d) # For 4x4 affine, 4D image, add extra identity dimension - assert_array_equal(adapt_affine(aff_3d, 4), - [[ 0, 1, 2, 0, 11], - [ 3, 4, 5, 0, 12], - [ 6, 7, 8, 0, 13], - [ 0, 0, 0, 1, 0], - [ 0, 0, 0, 0, 1]]) + assert_array_equal( + adapt_affine(aff_3d, 4), + [[0, 1, 2, 0, 11], [3, 4, 5, 0, 12], [6, 7, 8, 0, 13], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], + ) # For 5x5 affine, 4D image, identity aff_4d = from_matvec(np.arange(16).reshape((4, 4)), [11, 12, 13, 14]) assert_array_equal(adapt_affine(aff_4d, 4), aff_4d) # For 4x4 affine, 2D image, dropped column - assert_array_equal(adapt_affine(aff_3d, 2), - [[ 0, 1, 11], - [ 3, 4, 12], - [ 6, 7, 13], - [ 0, 0, 1]]) + assert_array_equal(adapt_affine(aff_3d, 2), [[0, 1, 11], [3, 4, 12], [6, 7, 13], [0, 0, 1]]) # For 4x4 affine, 1D image, 2 dropped columns - assert_array_equal(adapt_affine(aff_3d, 1), - [[ 0, 11], - [ 3, 12], - [ 6, 13], - [ 0, 1]]) + assert_array_equal(adapt_affine(aff_3d, 1), [[0, 11], [3, 12], [6, 13], [0, 1]]) # For 3x3 affine, 2D image, identity aff_2d = from_matvec(np.arange(4).reshape((2, 2)), [11, 12]) assert_array_equal(adapt_affine(aff_2d, 2), aff_2d) @@ -111,8 +111,7 @@ def test_resample_from_to(caplog): ax_flip_ornt = flip_ornt.copy() ax_flip_ornt[axis, 1] = -1 aff_flip_i = inv_ornt_aff(ax_flip_ornt, (2, 3, 4)) - flipped_img = Nifti1Image(np.flip(data, axis), - np.dot(affine, aff_flip_i)) + flipped_img = Nifti1Image(np.flip(data, axis), np.dot(affine, aff_flip_i)) out = resample_from_to(flipped_img, ((2, 3, 4), affine)) assert_almost_equal(img.dataobj, out.dataobj) assert_array_equal(img.affine, out.affine) @@ -255,13 +254,10 @@ def test_resample_to_output(caplog): # Subsample voxels out_img = resample_to_output(Nifti1Image(data, np.diag([4, 5, 6, 1]))) with pytest.warns(UserWarning): # Suppress scipy warning - exp_out = spnd.affine_transform(data, - [1/4, 1/5, 1/6], - output_shape = (5, 11, 19)) + exp_out = spnd.affine_transform(data, [1 / 4, 1 / 5, 1 / 6], output_shape=(5, 11, 19)) assert_array_equal(out_img.dataobj, exp_out) # Unsubsample with voxel sizes - out_img = resample_to_output(Nifti1Image(data, np.diag([4, 5, 6, 1])), - [4, 5, 6]) + out_img = resample_to_output(Nifti1Image(data, np.diag([4, 5, 6, 1])), [4, 5, 6]) assert_array_equal(out_img.dataobj, data) # A rotation to test nearest, order, cval rot_3 = from_matvec(euler2mat(np.pi / 4), [0, 0, 0]) @@ -269,10 +265,9 @@ def test_resample_to_output(caplog): out_img = resample_to_output(rot_3_img) exp_shape = (4, 4, 4) assert out_img.shape == exp_shape - exp_aff = np.array([[1, 0, 0, -2 * np.cos(np.pi / 4)], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]) + exp_aff = np.array( + [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] + ) assert_almost_equal(out_img.affine, exp_aff) rzs, trans = to_matvec(np.dot(npl.inv(rot_3), exp_aff)) exp_out = spnd.affine_transform(data, rzs, trans, exp_shape) @@ -280,15 +275,18 @@ def test_resample_to_output(caplog): # Order assert_almost_equal( resample_to_output(rot_3_img, order=0).dataobj, - spnd.affine_transform(data, rzs, trans, exp_shape, order=0)) + spnd.affine_transform(data, rzs, trans, exp_shape, order=0), + ) # Cval assert_almost_equal( resample_to_output(rot_3_img, cval=99).dataobj, - spnd.affine_transform(data, rzs, trans, exp_shape, cval=99)) + spnd.affine_transform(data, rzs, trans, exp_shape, cval=99), + ) # Mode assert_almost_equal( resample_to_output(rot_3_img, mode='nearest').dataobj, - spnd.affine_transform(data, rzs, trans, exp_shape, mode='nearest')) + spnd.affine_transform(data, rzs, trans, exp_shape, mode='nearest'), + ) # out_class img_ni1 = Nifti2Image(data, np.eye(4)) img_ni2 = Nifti2Image(data, np.eye(4)) @@ -343,8 +341,7 @@ def test_smooth_image(caplog): exp_out = spnd.gaussian_filter(data, sd, mode='constant') assert_array_equal(smooth_image(img, 8, mode='constant').dataobj, exp_out) exp_out = spnd.gaussian_filter(data, sd, mode='constant', cval=99) - assert_array_equal(smooth_image(img, 8, mode='constant', cval=99).dataobj, - exp_out) + assert_array_equal(smooth_image(img, 8, mode='constant', cval=99).dataobj, exp_out) # out_class img_ni1 = Nifti1Image(data, np.eye(4)) img_ni2 = Nifti2Image(data, np.eye(4)) @@ -383,8 +380,7 @@ def test_spatial_axes_check(caplog): def assert_spm_resampling_close(from_img, our_resampled, spm_resampled): - """ Assert our resampling is close to SPM's, allowing for edge effects - """ + """Assert our resampling is close to SPM's, allowing for edge effects""" # To allow for differences in the way SPM and scipy.ndimage handle off-edge # interpolation, mask out voxels off edge to_img_shape = spm_resampled.shape @@ -396,9 +392,9 @@ def assert_spm_resampling_close(from_img, our_resampled, spm_resampled): # Places where SPM may not return default value but scipy.ndimage will (SPM # does not return zeros <0.05 from image edges). # See: https://github.com/nipy/nibabel/pull/255#issuecomment-186774173 - outside_vol = np.any((resamp_coords < 0) | - (np.subtract(resamp_coords, from_img.shape) > -1), - axis=-1) + outside_vol = np.any( + (resamp_coords < 0) | (np.subtract(resamp_coords, from_img.shape) > -1), axis=-1 + ) spm_res = np.where(outside_vol, np.nan, np.array(spm_resampled.dataobj)) assert_allclose_safely(our_resampled.dataobj, spm_res) assert_almost_equal(our_resampled.affine, spm_resampled.affine, 5) @@ -417,12 +413,8 @@ def test_against_spm_resample(): func = nib.load(pjoin(DATA_DIR, 'functional.nii')) some_rotations = euler2mat(0.1, 0.2, 0.3) extra_affine = from_matvec(some_rotations, [3, 4, 5]) - moved_anat = nib.Nifti1Image(anat.get_fdata(), - extra_affine.dot(anat.affine), - anat.header) - one_func = nib.Nifti1Image(func.dataobj[..., 0], - func.affine, - func.header) + moved_anat = nib.Nifti1Image(anat.get_fdata(), extra_affine.dot(anat.affine), anat.header) + one_func = nib.Nifti1Image(func.dataobj[..., 0], func.affine, func.header) moved2func = resample_from_to(moved_anat, one_func, order=1, cval=np.nan) spm_moved = nib.load(pjoin(DATA_DIR, 'resampled_anat_moved.nii')) assert_spm_resampling_close(moved_anat, moved2func, spm_moved) @@ -431,7 +423,7 @@ def test_against_spm_resample(): # John Ashburner). moved2output = resample_to_output(moved_anat, 4, order=1, cval=np.nan) spm2output = nib.load(pjoin(DATA_DIR, 'reoriented_anat_moved.nii')) - assert_spm_resampling_close(moved_anat, moved2output, spm2output); + assert_spm_resampling_close(moved_anat, moved2output, spm2output) @needs_scipy @@ -448,8 +440,13 @@ def test_conform(caplog): # Test with non-default arguments. with caplog.at_level(logging.CRITICAL): # Suppress logs when changing classes - c = conform(anat, out_shape=(100, 100, 200), voxel_size=(2, 2, 1.5), - orientation="LPI", out_class=Nifti2Image) + c = conform( + anat, + out_shape=(100, 100, 200), + voxel_size=(2, 2, 1.5), + orientation='LPI', + out_class=Nifti2Image, + ) assert c.shape == (100, 100, 200) assert c.header.get_zooms() == (2, 2, 1.5) assert c.dataobj.dtype.type == anat.dataobj.dtype.type diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 3b91709964..c2ca1ed27c 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Validate image proxy API +"""Validate image proxy API Minimum array proxy API is: @@ -77,15 +77,13 @@ def _some_slicers(shape): slicers[i, i] = 0 # Add a newaxis to keep us on our toes no_pos = ndim // 2 - slicers = np.hstack((slicers[:, :no_pos], - np.empty((ndim, 1)), - slicers[:, no_pos:])) + slicers = np.hstack((slicers[:, :no_pos], np.empty((ndim, 1)), slicers[:, no_pos:])) slicers[:, no_pos] = None return [tuple(s) for s in slicers] class _TestProxyAPI(ValidateAPI): - """ Base class for testing proxy APIs + """Base class for testing proxy APIs Assumes that real classes will provide an `obj_params` method which is a generator returning 2 tuples of (, ). @@ -97,6 +95,7 @@ class _TestProxyAPI(ValidateAPI): The
above should support at least "get_data_dtype", "set_data_dtype", "get_data_shape", "set_data_shape" """ + # Flag True if offset can be set into header of image settable_offset = False @@ -203,11 +202,12 @@ def validate_proxy_slicing(self, pmaker, params): class TestAnalyzeProxyAPI(_TestProxyAPI): - """ Specific Analyze-type array proxy API test + """Specific Analyze-type array proxy API test The analyze proxy extends the general API by adding read-only attributes ``slope, inter, offset`` """ + proxy_class = ArrayProxy header_class = AnalyzeHeader shapes = ((2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)) @@ -221,7 +221,7 @@ class TestAnalyzeProxyAPI(_TestProxyAPI): data_endian = '=' def obj_params(self): - """ Iterator returning (``proxy_creator``, ``proxy_params``) pairs + """Iterator returning (``proxy_creator``, ``proxy_params``) pairs Each pair will be tested separately. @@ -240,13 +240,11 @@ def obj_params(self): offsets = (0, 16) # For non-integral parameters, cast to float32 value can be losslessly cast # later, enabling exact checks, then back to float for consistency - slopes = (1., 2., float(np.float32(3.1416))) if self.has_slope else (1.,) - inters = (0., 10., float(np.float32(2.7183))) if self.has_inter else (0.,) - for shape, dtype, offset, slope, inter in product(self.shapes, - self.data_dtypes, - offsets, - slopes, - inters): + slopes = (1.0, 2.0, float(np.float32(3.1416))) if self.has_slope else (1.0,) + inters = (0.0, 10.0, float(np.float32(2.7183))) if self.has_inter else (0.0,) + for shape, dtype, offset, slope, inter in product( + self.shapes, self.data_dtypes, offsets, slopes, inters + ): n_els = np.prod(shape) dtype = np.dtype(dtype).newbyteorder(self.data_endian) arr = np.arange(n_els, dtype=dtype).reshape(shape) @@ -264,9 +262,7 @@ def obj_params(self): # and datatypes of slope, inter hdr.set_slope_inter(slope, inter) s, i = hdr.get_slope_inter() - tmp = apply_read_scaling(arr, - 1. if s is None else s, - 0. if i is None else i) + tmp = apply_read_scaling(arr, 1.0 if s is None else s, 0.0 if i is None else i) dtype_out = tmp.dtype.type def sio_func(): @@ -277,9 +273,7 @@ def sio_func(): # Use a copy of the header to avoid changing # global header in test functions. new_hdr = hdr.copy() - return (self.proxy_class(fio, new_hdr), - fio, - new_hdr) + return (self.proxy_class(fio, new_hdr), fio, new_hdr) params = dict( dtype=dtype, @@ -289,7 +283,8 @@ def sio_func(): shape=shape, offset=offset, slope=slope, - inter=inter) + inter=inter, + ) yield sio_func, params # Same with filenames with InTemporaryDirectory(): @@ -302,9 +297,8 @@ def fname_func(): # Use a copy of the header to avoid changing # global header in test functions. new_hdr = hdr.copy() - return (self.proxy_class(fname, new_hdr), - fname, - new_hdr) + return (self.proxy_class(fname, new_hdr), fname, new_hdr) + params = params.copy() yield fname_func, params @@ -339,8 +333,20 @@ class TestSpm2AnalyzeProxyAPI(TestSpm99AnalyzeProxyAPI): class TestNifti1ProxyAPI(TestSpm99AnalyzeProxyAPI): header_class = Nifti1Header has_inter = True - data_dtypes = (np.uint8, np.int16, np.int32, np.float32, np.complex64, np.float64, - np.int8, np.uint16, np.uint32, np.int64, np.uint64, np.complex128) + data_dtypes = ( + np.uint8, + np.int16, + np.int32, + np.float32, + np.complex64, + np.float64, + np.int8, + np.uint16, + np.uint32, + np.int64, + np.uint64, + np.complex128, + ) if have_binary128(): data_dtypes += (np.float128, np.complex256) @@ -366,7 +372,7 @@ def opener(f): return netcdf_file(f, mode='r') def obj_params(self): - """ Iterator returning (``proxy_creator``, ``proxy_params``) pairs + """Iterator returning (``proxy_creator``, ``proxy_params``) pairs Each pair will be tested separately. @@ -378,8 +384,7 @@ def obj_params(self): having an effect on the later tests in the same function. """ eg_path = pjoin(DATA_PATH, self.eg_fname) - arr_out = self.file_class( - self.opener(eg_path)).get_scaled_data() + arr_out = self.file_class(self.opener(eg_path)).get_scaled_data() def eg_func(): mf = self.file_class(self.opener(eg_path)) @@ -387,13 +392,12 @@ def eg_func(): img = self.module.load(eg_path) fobj = open(eg_path, 'rb') return prox, fobj, img.header - yield (eg_func, - dict(shape=self.eg_shape, - dtype_out=np.float64, - arr_out=arr_out)) + + yield (eg_func, dict(shape=self.eg_shape, dtype_out=np.float64, arr_out=arr_out)) if have_h5py: + class TestMinc2API(TestMinc1API): module = minc2 file_class = minc2.Minc2File @@ -420,32 +424,25 @@ def eg_func(): prox = ecat.EcatImageArrayProxy(sh) fobj = open(eg_path, 'rb') return prox, fobj, sh - yield (eg_func, - dict(shape=self.eg_shape, - dtype_out=np.float64, - arr_out=arr_out)) + + yield (eg_func, dict(shape=self.eg_shape, dtype_out=np.float64, arr_out=arr_out)) def validate_header_isolated(self, pmaker, params): raise unittest.SkipTest('ECAT header does not support dtype get') class TestPARRECAPI(_TestProxyAPI): - def _func_dict(self, rec_name): img = parrec.load(rec_name) arr_out = img.get_fdata() def eg_func(): img = parrec.load(rec_name) - prox = parrec.PARRECArrayProxy(rec_name, - img.header, - scaling='dv') + prox = parrec.PARRECArrayProxy(rec_name, img.header, scaling='dv') fobj = open(rec_name, 'rb') return prox, fobj, img.header - return (eg_func, - dict(shape=img.shape, - dtype_out=np.float64, - arr_out=arr_out)) + + return (eg_func, dict(shape=img.shape, dtype_out=np.float64, arr_out=arr_out)) def obj_params(self): yield self._func_dict(EG_REC) diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index fe50fc0199..3dc681f517 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test quaternion calculations """ +"""Test quaternion calculations""" import numpy as np from numpy import pi @@ -99,7 +99,7 @@ def test_inverse_0(): assert iq.dtype.kind == 'f' -@pytest.mark.parametrize("M, q", eg_pairs) +@pytest.mark.parametrize('M, q', eg_pairs) def test_inverse_1(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -122,15 +122,15 @@ def test_norm(): assert not nq.isunit(qi) -@pytest.mark.parametrize("M1, q1", eg_pairs[0::4]) -@pytest.mark.parametrize("M2, q2", eg_pairs[1::4]) +@pytest.mark.parametrize('M1, q1', eg_pairs[0::4]) +@pytest.mark.parametrize('M2, q2', eg_pairs[1::4]) def test_mult(M1, q1, M2, q2): # Test that quaternion * same as matrix * q21 = nq.mult(q2, q1) assert_array_almost_equal, np.dot(M2, M1), nq.quat2mat(q21) -@pytest.mark.parametrize("M, q", eg_pairs) +@pytest.mark.parametrize('M, q', eg_pairs) def test_inverse(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -144,15 +144,15 @@ def test_eye(): assert np.allclose(nq.quat2mat(qi), np.eye(3)) -@pytest.mark.parametrize("vec", np.eye(3)) -@pytest.mark.parametrize("M, q", eg_pairs) +@pytest.mark.parametrize('vec', np.eye(3)) +@pytest.mark.parametrize('M, q', eg_pairs) def test_qrotate(vec, M, q): vdash = nq.rotate_vector(vec, q) vM = np.dot(M, vec) assert_array_almost_equal(vdash, vM) -@pytest.mark.parametrize("q", unit_quats) +@pytest.mark.parametrize('q', unit_quats) def test_quaternion_reconstruction(q): # Test reconstruction of arbitrary unit quaternions M = nq.quat2mat(q) @@ -160,7 +160,7 @@ def test_quaternion_reconstruction(q): # Accept positive or negative match posm = np.allclose(q, qt) negm = np.allclose(q, -qt) - assert (posm or negm) + assert posm or negm def test_angle_axis2quat(): diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 127a7b0704..1d903d6f9f 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests recoder class """ +"""Tests recoder class""" import numpy as np @@ -24,6 +24,7 @@ def test_recoder_1(): with pytest.raises(KeyError): rc.code[3] + def test_recoder_2(): # with explicit name for code codes = ((1,), (2,)) @@ -49,6 +50,7 @@ def test_recoder_3(): with pytest.raises(AttributeError): rc.label + def test_recoder_4(): # with explicit column names codes = ((1, 'one'), (2, 'two')) @@ -86,7 +88,6 @@ def test_recoder_6(): def test_custom_dicter(): # Allow custom dict-like object in constructor class MyDict: - def __init__(self): self._keys = [] @@ -103,6 +104,7 @@ def keys(self): def values(self): return ['funny', 'list'] + # code, label, aliases codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes, map_maker=MyDict) diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index c54a069e55..9300dfa207 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -4,77 +4,106 @@ import pytest MODULE_SCHEDULE = [ - ("5.0.0", ["nibabel.keywordonly", "nibabel.py3k"]), - ("4.0.0", ["nibabel.trackvis"]), - ("3.0.0", ["nibabel.minc", "nibabel.checkwarns"]), + ('5.0.0', ['nibabel.keywordonly', 'nibabel.py3k']), + ('4.0.0', ['nibabel.trackvis']), + ('3.0.0', ['nibabel.minc', 'nibabel.checkwarns']), # Verify that the test will be quiet if the schedule outlives the modules - ("1.0.0", ["nibabel.nosuchmod"]), + ('1.0.0', ['nibabel.nosuchmod']), ] OBJECT_SCHEDULE = [ - ("7.0.0", [("nibabel.gifti.gifti", "GiftiNVPairs"), - ]), - ("6.0.0", [("nibabel.loadsave", "guessed_image_type"), - ("nibabel.loadsave", "read_img_data"), - ("nibabel.orientations", "flip_axis"), - ("nibabel.pydicom_compat", "dicom_test"), - ("nibabel.onetime", "setattr_on_read"), - ]), - ("5.0.0", [("nibabel.gifti.gifti", "data_tag"), - ("nibabel.gifti.giftiio", "read"), - ("nibabel.gifti.giftiio", "write"), - ("nibabel.gifti.parse_gifti_fast", "Outputter"), - ("nibabel.gifti.parse_gifti_fast", "parse_gifti_file"), - ("nibabel.imageclasses", "ext_map"), - ("nibabel.imageclasses", "class_map"), - ("nibabel.loadsave", "which_analyze_type"), - ("nibabel.volumeutils", "BinOpener"), - ("nibabel.volumeutils", "allopen"), - ("nibabel.orientations", "orientation_affine"), - ("nibabel.spatialimages", "Header"), - ]), - ("4.0.0", [("nibabel.minc1", "MincFile"), - ("nibabel.minc1", "MincImage")]), - ("3.0.0", [("nibabel.testing", "catch_warn_reset")]), + ( + '7.0.0', + [ + ('nibabel.gifti.gifti', 'GiftiNVPairs'), + ], + ), + ( + '6.0.0', + [ + ('nibabel.loadsave', 'guessed_image_type'), + ('nibabel.loadsave', 'read_img_data'), + ('nibabel.orientations', 'flip_axis'), + ('nibabel.pydicom_compat', 'dicom_test'), + ('nibabel.onetime', 'setattr_on_read'), + ], + ), + ( + '5.0.0', + [ + ('nibabel.gifti.gifti', 'data_tag'), + ('nibabel.gifti.giftiio', 'read'), + ('nibabel.gifti.giftiio', 'write'), + ('nibabel.gifti.parse_gifti_fast', 'Outputter'), + ('nibabel.gifti.parse_gifti_fast', 'parse_gifti_file'), + ('nibabel.imageclasses', 'ext_map'), + ('nibabel.imageclasses', 'class_map'), + ('nibabel.loadsave', 'which_analyze_type'), + ('nibabel.volumeutils', 'BinOpener'), + ('nibabel.volumeutils', 'allopen'), + ('nibabel.orientations', 'orientation_affine'), + ('nibabel.spatialimages', 'Header'), + ], + ), + ('4.0.0', [('nibabel.minc1', 'MincFile'), ('nibabel.minc1', 'MincImage')]), + ('3.0.0', [('nibabel.testing', 'catch_warn_reset')]), # Verify that the test will be quiet if the schedule outlives the modules - ("1.0.0", [("nibabel.nosuchmod", "anyobj"), ("nibabel.nifti1", "nosuchobj")]), + ('1.0.0', [('nibabel.nosuchmod', 'anyobj'), ('nibabel.nifti1', 'nosuchobj')]), ] ATTRIBUTE_SCHEDULE = [ - ("7.0.0", [("nibabel.gifti.gifti", "GiftiMetaData", "from_dict"), - ("nibabel.gifti.gifti", "GiftiMetaData", "metadata"), - ("nibabel.gifti.gifti", "GiftiMetaData", "data"), - ]), - ("5.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_data"), - ("nibabel.freesurfer.mghformat", "MGHHeader", "_header_data"), - ("nibabel.gifti.gifti", "GiftiDataArray", "from_array"), - ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_open"), - ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_close"), - ("nibabel.gifti.gifti", "GiftiDataArray", "get_metadata"), - ("nibabel.gifti.gifti", "GiftiImage", "get_labeltable"), - ("nibabel.gifti.gifti", "GiftiImage", "set_labeltable"), - ("nibabel.gifti.gifti", "GiftiImage", "get_metadata"), - ("nibabel.gifti.gifti", "GiftiImage", "set_metadata"), - ("nibabel.gifti.gifti", "GiftiImage", "getArraysFromIntent"), - ("nibabel.gifti.gifti", "GiftiMetaData", "get_metadata"), - ("nibabel.gifti.gifti", "GiftiLabel", "get_rgba"), - ("nibabel.nicom.dicomwrappers", "Wrapper", "get_affine"), - ("nibabel.streamlines.array_sequence", "ArraySequence", "data"), - ("nibabel.ecat", "EcatImage", "from_filespec"), - ("nibabel.filebasedimages", "FileBasedImage", "get_header"), - ("nibabel.spatialimages", "SpatialImage", "get_affine"), - ("nibabel.arraywriters", "ArrayWriter", "_check_nan2zero"), - ]), - ("4.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_shape"), - ("nibabel.filebasedimages", "FileBasedImage", "filespec_to_files"), - ("nibabel.filebasedimages", "FileBasedImage", "to_filespec"), - ("nibabel.filebasedimages", "FileBasedImage", "to_files"), - ("nibabel.filebasedimages", "FileBasedImage", "from_files"), - ("nibabel.arrayproxy", "ArrayProxy", "header")]), + ( + '7.0.0', + [ + ('nibabel.gifti.gifti', 'GiftiMetaData', 'from_dict'), + ('nibabel.gifti.gifti', 'GiftiMetaData', 'metadata'), + ('nibabel.gifti.gifti', 'GiftiMetaData', 'data'), + ], + ), + ( + '5.0.0', + [ + ('nibabel.dataobj_images', 'DataobjImage', 'get_data'), + ('nibabel.freesurfer.mghformat', 'MGHHeader', '_header_data'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'from_array'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'to_xml_open'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'to_xml_close'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'get_metadata'), + ('nibabel.gifti.gifti', 'GiftiImage', 'get_labeltable'), + ('nibabel.gifti.gifti', 'GiftiImage', 'set_labeltable'), + ('nibabel.gifti.gifti', 'GiftiImage', 'get_metadata'), + ('nibabel.gifti.gifti', 'GiftiImage', 'set_metadata'), + ('nibabel.gifti.gifti', 'GiftiImage', 'getArraysFromIntent'), + ('nibabel.gifti.gifti', 'GiftiMetaData', 'get_metadata'), + ('nibabel.gifti.gifti', 'GiftiLabel', 'get_rgba'), + ('nibabel.nicom.dicomwrappers', 'Wrapper', 'get_affine'), + ('nibabel.streamlines.array_sequence', 'ArraySequence', 'data'), + ('nibabel.ecat', 'EcatImage', 'from_filespec'), + ('nibabel.filebasedimages', 'FileBasedImage', 'get_header'), + ('nibabel.spatialimages', 'SpatialImage', 'get_affine'), + ('nibabel.arraywriters', 'ArrayWriter', '_check_nan2zero'), + ], + ), + ( + '4.0.0', + [ + ('nibabel.dataobj_images', 'DataobjImage', 'get_shape'), + ('nibabel.filebasedimages', 'FileBasedImage', 'filespec_to_files'), + ('nibabel.filebasedimages', 'FileBasedImage', 'to_filespec'), + ('nibabel.filebasedimages', 'FileBasedImage', 'to_files'), + ('nibabel.filebasedimages', 'FileBasedImage', 'from_files'), + ('nibabel.arrayproxy', 'ArrayProxy', 'header'), + ], + ), # Verify that the test will be quiet if the schedule outlives the modules - ("1.0.0", [("nibabel.nosuchmod", "anyobj", "anyattr"), - ("nibabel.nifti1", "nosuchobj", "anyattr"), - ("nibabel.nifti1", "Nifti1Image", "nosuchattr")]), + ( + '1.0.0', + [ + ('nibabel.nosuchmod', 'anyobj', 'anyattr'), + ('nibabel.nifti1', 'nosuchobj', 'anyattr'), + ('nibabel.nifti1', 'Nifti1Image', 'nosuchattr'), + ], + ), ] @@ -86,7 +115,7 @@ def test_module_removal(): for module in _filter(MODULE_SCHEDULE): with pytest.raises(ImportError): __import__(module) - assert False, f"Time to remove {module}" + assert False, f'Time to remove {module}' def test_object_removal(): @@ -95,7 +124,7 @@ def test_object_removal(): module = __import__(module_name) except ImportError: continue - assert not hasattr(module, obj), f"Time to remove {module_name}.{obj}" + assert not hasattr(module, obj), f'Time to remove {module_name}.{obj}' def test_attribute_removal(): @@ -108,29 +137,29 @@ def test_attribute_removal(): klass = getattr(module, cls) except AttributeError: continue - assert not hasattr(klass, attr), f"Time to remove {module_name}.{cls}.{attr}" + assert not hasattr(klass, attr), f'Time to remove {module_name}.{cls}.{attr}' # # Test the tests, making sure that we will get errors when the time comes # -_sched = "nibabel.tests.test_removalschedule.{}_SCHEDULE".format +_sched = 'nibabel.tests.test_removalschedule.{}_SCHEDULE'.format -@mock.patch(_sched("MODULE"), [("3.0.0", ["nibabel.nifti1"])]) +@mock.patch(_sched('MODULE'), [('3.0.0', ['nibabel.nifti1'])]) def test_unremoved_module(): with pytest.raises(AssertionError): test_module_removal() -@mock.patch(_sched("OBJECT"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image")])]) +@mock.patch(_sched('OBJECT'), [('3.0.0', [('nibabel.nifti1', 'Nifti1Image')])]) def test_unremoved_object(): with pytest.raises(AssertionError): test_object_removal() -@mock.patch(_sched("ATTRIBUTE"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image", "affine")])]) +@mock.patch(_sched('ATTRIBUTE'), [('3.0.0', [('nibabel.nifti1', 'Nifti1Image', 'affine')])]) def test_unremoved_attr(): with pytest.raises(AssertionError): test_attribute_removal() diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index dfc53a2bdb..54ab79a928 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -1,4 +1,4 @@ -""" Test numerical errors introduced by writing then reading images +"""Test numerical errors introduced by writing then reading images Test arrays with a range of numerical values, integer and floating point. """ @@ -43,7 +43,7 @@ def check_params(in_arr, in_type, out_type): def big_bad_ulp(arr): - """ Return array of ulp values for values in `arr` + """Return array of ulp values for values in `arr` I haven't thought about whether the vectorized log2 here could lead to incorrect rounding; this only needs to be ballpark @@ -70,7 +70,7 @@ def big_bad_ulp(arr): nzs = working_arr > 0 fl2[nzs] = np.floor(np.log(working_arr[nzs]) / LOGe2) fl2 = np.clip(fl2, info['minexp'], np.inf) - return 2**(fl2 - info['nmant']) + return 2 ** (fl2 - info['nmant']) def test_big_bad_ulp(): @@ -80,8 +80,17 @@ def test_big_bad_ulp(): min_ulp = 2 ** (ti['minexp'] - ti['nmant']) in_arr = np.zeros((10,), dtype=ftype) in_arr = np.array([0, 0, 1, 2, 4, 5, -5, -np.inf, np.inf], dtype=ftype) - out_arr = [min_ulp, min_ulp, fi.eps, fi.eps * 2, fi.eps * 4, - fi.eps * 4, fi.eps * 4, np.inf, np.inf] + out_arr = [ + min_ulp, + min_ulp, + fi.eps, + fi.eps * 2, + fi.eps * 4, + fi.eps * 4, + fi.eps * 4, + np.inf, + np.inf, + ] assert_array_equal(big_bad_ulp(in_arr).astype(ftype), out_arr) @@ -158,8 +167,7 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): with np.errstate(over='ignore'): Ai = arr - scaling_type(inter) Ais = Ai / scaling_type(slope) - exp_abs_err = inting_err + inter_err + ( - big_bad_ulp(Ai) + big_bad_ulp(Ais)) + exp_abs_err = inting_err + inter_err + (big_bad_ulp(Ai) + big_bad_ulp(Ais)) # Relative scaling error from calculation of slope # This threshold needs to be 2 x larger on windows 32 bit and PPC for # some reason @@ -167,8 +175,8 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): test_vals = (abs_err <= exp_abs_err) | (rel_err <= rel_thresh) this_test = np.all(test_vals) if DEBUG: - abs_fails = (abs_err > exp_abs_err) - rel_fails = (rel_err > rel_thresh) + abs_fails = abs_err > exp_abs_err + rel_fails = rel_err > rel_thresh all_fails = abs_fails & rel_fails if np.any(rel_fails): abs_mx_e = abs_err[rel_fails].max() @@ -180,14 +188,19 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): rel_mx_e = rel_err[abs_fails].max() else: rel_mx_e = None - print((test_id, - np.dtype(in_type).str, - np.dtype(out_type).str, - exp_abs_mx_e, - abs_mx_e, - rel_thresh, - rel_mx_e, - slope, inter)) + print( + ( + test_id, + np.dtype(in_type).str, + np.dtype(out_type).str, + exp_abs_mx_e, + abs_mx_e, + rel_thresh, + rel_mx_e, + slope, + inter, + ) + ) # To help debugging failures with --pdb-failure np.nonzero(all_fails) assert this_test diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index 4fb83d3170..55a0aace7c 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -1,4 +1,4 @@ -""" Test printable table +"""Test printable table """ import numpy as np @@ -12,40 +12,47 @@ def test_rst_table(): # Tests for printable table function R, C = 3, 4 cell_values = np.arange(R * C).reshape((R, C)) - assert (rst_table(cell_values) == - """+--------+--------+--------+--------+--------+ + assert ( + rst_table(cell_values) + == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+--------+--------+--------+--------+""") - assert (rst_table(cell_values, ['a', 'b', 'c']) == - """+---+--------+--------+--------+--------+ ++--------+--------+--------+--------+--------+""" + ) + assert ( + rst_table(cell_values, ['a', 'b', 'c']) + == """+---+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +===+========+========+========+========+ | a | 0.00 | 1.00 | 2.00 | 3.00 | | b | 4.00 | 5.00 | 6.00 | 7.00 | | c | 8.00 | 9.00 | 10.00 | 11.00 | -+---+--------+--------+--------+--------+""") ++---+--------+--------+--------+--------+""" + ) with pytest.raises(ValueError): rst_table(cell_values, ['a', 'b']) with pytest.raises(ValueError): rst_table(cell_values, ['a', 'b', 'c', 'd']) - assert (rst_table(cell_values, None, ['1', '2', '3', '4']) == - """+--------+-------+-------+-------+-------+ + assert ( + rst_table(cell_values, None, ['1', '2', '3', '4']) + == """+--------+-------+-------+-------+-------+ | | 1 | 2 | 3 | 4 | +========+=======+=======+=======+=======+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+-------+-------+-------+-------+""") ++--------+-------+-------+-------+-------+""" + ) with pytest.raises(ValueError): rst_table(cell_values, None, ['1', '2', '3']) with pytest.raises(ValueError): rst_table(cell_values, None, list('12345')) - assert (rst_table(cell_values, title='A title') == - """******* + assert ( + rst_table(cell_values, title='A title') + == """******* A title ******* @@ -55,35 +62,36 @@ def test_rst_table(): | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+--------+--------+--------+--------+""") - assert (rst_table(cell_values, val_fmt='{0}') == - """+--------+--------+--------+--------+--------+ ++--------+--------+--------+--------+--------+""" + ) + assert ( + rst_table(cell_values, val_fmt='{0}') + == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0 | 1 | 2 | 3 | | row[1] | 4 | 5 | 6 | 7 | | row[2] | 8 | 9 | 10 | 11 | -+--------+--------+--------+--------+--------+""") ++--------+--------+--------+--------+--------+""" + ) # Doing a fancy cell format cell_values_back = np.arange(R * C)[::-1].reshape((R, C)) cell_3d = np.dstack((cell_values, cell_values_back)) - assert (rst_table(cell_3d, val_fmt='{0[0]}-{0[1]}') == - """+--------+--------+--------+--------+--------+ + assert ( + rst_table(cell_3d, val_fmt='{0[0]}-{0[1]}') + == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0-11 | 1-10 | 2-9 | 3-8 | | row[1] | 4-7 | 5-6 | 6-5 | 7-4 | | row[2] | 8-3 | 9-2 | 10-1 | 11-0 | -+--------+--------+--------+--------+--------+""") ++--------+--------+--------+--------+--------+""" + ) # Test formatting characters - formats = dict( - down='!', - along='_', - thick_long='~', - cross='%', - title_heading='#') - assert (rst_table(cell_values, title='A title', format_chars=formats) == - """####### + formats = dict(down='!', along='_', thick_long='~', cross='%', title_heading='#') + assert ( + rst_table(cell_values, title='A title', format_chars=formats) + == """####### A title ####### @@ -93,7 +101,8 @@ def test_rst_table(): ! row[0] ! 0.00 ! 1.00 ! 2.00 ! 3.00 ! ! row[1] ! 4.00 ! 5.00 ! 6.00 ! 7.00 ! ! row[2] ! 8.00 ! 9.00 ! 10.00 ! 11.00 ! -%________%________%________%________%________%""") +%________%________%________%________%________%""" + ) formats['funny_value'] = '!' with pytest.raises(ValueError): rst_table(cell_values, title='A title', format_chars=formats) diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index b1a00c0570..e705a96c83 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for scaling / rounding in volumeutils module """ +"""Test for scaling / rounding in volumeutils module""" import numpy as np import warnings @@ -18,7 +18,7 @@ from .test_volumeutils import _calculate_scale -from numpy.testing import (assert_array_almost_equal, assert_array_equal) +from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest @@ -26,30 +26,33 @@ DEBUG = True -@pytest.mark.parametrize("in_arr, res", [ - ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), - (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), - ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices - (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), - ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices - (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), - ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([np.nan], (np.inf, -np.inf)), - ([np.inf], (np.inf, -np.inf)), - ([-np.inf], (np.inf, -np.inf)), - ([np.inf, 1], (1, 1)), # only look at finite values - ([-np.inf, 1], (1, 1)), - ([[], []], (np.inf, -np.inf)), # empty array - (np.array([[-3, 0, 1], [2, -1, 4]], dtype=int), (-3, 4)), - (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), - ([0., 1, 2, 3], (0, 3)), - # Complex comparison works as if they are floats - ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), - ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), -]) +@pytest.mark.parametrize( + 'in_arr, res', + [ + ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), + (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), + ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices + (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), + ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices + (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), + ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), + ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), + ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case + ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), + ([np.nan], (np.inf, -np.inf)), + ([np.inf], (np.inf, -np.inf)), + ([-np.inf], (np.inf, -np.inf)), + ([np.inf, 1], (1, 1)), # only look at finite values + ([-np.inf, 1], (1, 1)), + ([[], []], (np.inf, -np.inf)), # empty array + (np.array([[-3, 0, 1], [2, -1, 4]], dtype=int), (-3, 4)), + (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), + ([0.0, 1, 2, 3], (0, 3)), + # Complex comparison works as if they are floats + ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), + ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), + ], +) def test_finite_range(in_arr, res): # Finite range utility function assert finite_range(in_arr) == res @@ -71,12 +74,12 @@ def test_finite_range(in_arr, res): def test_finite_range_err(): # Test error cases - a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) + a = np.array([[1.0, 0, 1], [2, 3, 4]]).view([('f1', 'f')]) with pytest.raises(TypeError): finite_range(a) -@pytest.mark.parametrize("out_type", [np.int16, np.float32]) +@pytest.mark.parametrize('out_type', [np.int16, np.float32]) def test_a2f_mn_mx(out_type): # Test array to file mn, mx handling str_io = BytesIO() @@ -111,7 +114,7 @@ def test_a2f_mn_mx(out_type): def test_a2f_nan2zero(): # Test conditions under which nans written to zero - arr = np.array([np.nan, 99.], dtype=np.float32) + arr = np.array([np.nan, 99.0], dtype=np.float32) str_io = BytesIO() array_to_file(arr, str_io) data_back = array_from_file(arr.shape, np.float32, str_io) @@ -132,14 +135,17 @@ def test_a2f_nan2zero(): assert_array_equal(data_back, [np.array(np.nan).astype(np.int32), 99]) -@pytest.mark.parametrize("in_type, out_type", [ - (np.int16, np.int16), - (np.int16, np.int8), - (np.uint16, np.uint8), - (np.int32, np.int8), - (np.float32, np.uint8), - (np.float32, np.int16) -]) +@pytest.mark.parametrize( + 'in_type, out_type', + [ + (np.int16, np.int16), + (np.int16, np.int8), + (np.uint16, np.uint8), + (np.int32, np.int8), + (np.float32, np.uint8), + (np.float32, np.int16), + ], +) def test_array_file_scales(in_type, out_type): # Test scaling works for max, min when going from larger to smaller type, # and from float to integer. @@ -154,21 +160,24 @@ def test_array_file_scales(in_type, out_type): arr2 = array_from_file(arr.shape, out_dtype, bio) arr3 = apply_read_scaling(arr2, slope, inter) # Max rounding error for integer type - max_miss = slope / 2. + max_miss = slope / 2.0 assert np.all(np.abs(arr - arr3) <= max_miss) -@pytest.mark.parametrize("category0, category1, overflow",[ - # Confirm that, for all ints and uints as input, and all possible outputs, - # for any simple way of doing the calculation, the result is near enough - ('int', 'int', False), - ('uint', 'int', False), - # Converting floats to integer - ('float', 'int', True), - ('float', 'uint', True), - ('complex', 'int', True), - ('complex', 'uint', True), -]) +@pytest.mark.parametrize( + 'category0, category1, overflow', + [ + # Confirm that, for all ints and uints as input, and all possible outputs, + # for any simple way of doing the calculation, the result is near enough + ('int', 'int', False), + ('uint', 'int', False), + # Converting floats to integer + ('float', 'int', True), + ('float', 'uint', True), + ('complex', 'int', True), + ('complex', 'uint', True), + ], +) def test_scaling_in_abstract(category0, category1, overflow): for in_type in np.sctypes[category0]: for out_type in np.sctypes[category1]: diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 61a41f54ad..e4006788c1 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -1,6 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Test scripts +"""Test scripts Test running scripts """ @@ -8,8 +8,7 @@ import sys import os import shutil -from os.path import (dirname, join as pjoin, abspath, splitext, basename, - exists) +from os.path import dirname, join as pjoin, abspath, splitext, basename, exists import csv from glob import glob @@ -27,8 +26,7 @@ from .scriptrunner import ScriptRunner from .nibabel_data import needs_nibabel_data from ..testing import assert_dt_equal, assert_re_in -from .test_parrec import (DTI_PAR_BVECS, DTI_PAR_BVALS, - EXAMPLE_IMAGES as PARREC_EXAMPLES) +from .test_parrec import DTI_PAR_BVECS, DTI_PAR_BVALS, EXAMPLE_IMAGES as PARREC_EXAMPLES from .test_parrec_data import BALLS, AFF_OFF from ..testing import assert_data_similar @@ -39,9 +37,8 @@ def _proc_stdout(stdout): runner = ScriptRunner( - script_sdir='bin', - debug_print_var='NIPY_DEBUG_PRINT', - output_processor=_proc_stdout) + script_sdir='bin', debug_print_var='NIPY_DEBUG_PRINT', output_processor=_proc_stdout +) run_command = runner.run_command @@ -49,6 +46,8 @@ def script_test(func): # Decorator to label test as a script_test func.script_test = True return func + + script_test.__test__ = False # It's not a test DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) @@ -62,59 +61,91 @@ def load_small_file(): return False -def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): +def check_nib_ls_example4d(opts=[], hdrs_str='', other_str=''): # test nib-ls script fname = pjoin(DATA_PATH, 'example4d.nii.gz') - expected_re = (" (int16|[<>]i2) \\[128, 96, 24, 2\\] 2.00x2.00x2.20x2000.00 " - f"#exts: 2{hdrs_str} sform{other_str}$") + expected_re = ( + ' (int16|[<>]i2) \\[128, 96, 24, 2\\] 2.00x2.00x2.20x2000.00 ' + f'#exts: 2{hdrs_str} sform{other_str}$' + ) cmd = ['nib-ls'] + opts + [fname] code, stdout, stderr = run_command(cmd) - assert fname == stdout[:len(fname)] - assert_re_in(expected_re, stdout[len(fname):]) + assert fname == stdout[: len(fname)] + assert_re_in(expected_re, stdout[len(fname) :]) def check_nib_diff_examples(): - fnames = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] + fnames = [pjoin(DATA_PATH, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] code, stdout, stderr = run_command(['nib-diff'] + fnames, check_code=False) - checked_fields = ["Field/File", "regular", "dim_info", "dim", "datatype", "bitpix", "pixdim", "slice_end", - "xyzt_units", "cal_max", "descrip", "qform_code", "sform_code", "quatern_b", - "quatern_c", "quatern_d", "qoffset_x", "qoffset_y", "qoffset_z", "srow_x", - "srow_y", "srow_z", "DATA(md5)", "DATA(diff 1:)"] + checked_fields = [ + 'Field/File', + 'regular', + 'dim_info', + 'dim', + 'datatype', + 'bitpix', + 'pixdim', + 'slice_end', + 'xyzt_units', + 'cal_max', + 'descrip', + 'qform_code', + 'sform_code', + 'quatern_b', + 'quatern_c', + 'quatern_d', + 'qoffset_x', + 'qoffset_y', + 'qoffset_z', + 'srow_x', + 'srow_y', + 'srow_z', + 'DATA(md5)', + 'DATA(diff 1:)', + ] for item in checked_fields: assert item in stdout - fnames2 = [pjoin(DATA_PATH, f) - for f in ('example4d.nii.gz', 'example4d.nii.gz')] + fnames2 = [pjoin(DATA_PATH, f) for f in ('example4d.nii.gz', 'example4d.nii.gz')] code, stdout, stderr = run_command(['nib-diff'] + fnames2, check_code=False) - assert stdout == "These files are identical." + assert stdout == 'These files are identical.' - fnames3 = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz')] + fnames3 = [ + pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz') + ] code, stdout, stderr = run_command(['nib-diff'] + fnames3, check_code=False) for item in checked_fields: assert item in stdout - fnames4 = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz')] + fnames4 = [ + pjoin(DATA_PATH, f) for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz') + ] code, stdout, stderr = run_command(['nib-diff'] + fnames4, check_code=False) - assert stdout == "These files are identical." + assert stdout == 'These files are identical.' code, stdout, stderr = run_command(['nib-diff', '--dt', 'float64'] + fnames, check_code=False) for item in checked_fields: assert item in stdout -@pytest.mark.parametrize("args", [ - [], - [['-H', 'dim,bitpix'], r" \[ 4 128 96 24 2 1 1 1\] 16"], - [['-c'], "", " !1030 uniques. Use --all-counts"], - [['-c', '--all-counts'], "", " 2:3 3:2 4:1 5:1.*"], - # both stats and counts - [['-c', '-s', '--all-counts'], "", r" \[229725\] \[2, 1.2e\+03\] 2:3 3:2 4:1 5:1.*"], - # and must not error out if we allow for zeros - [['-c', '-s', '-z', '--all-counts'], "", r" \[589824\] \[0, 1.2e\+03\] 0:360099 2:3 3:2 4:1 5:1.*"], -]) +@pytest.mark.parametrize( + 'args', + [ + [], + [['-H', 'dim,bitpix'], r' \[ 4 128 96 24 2 1 1 1\] 16'], + [['-c'], '', ' !1030 uniques. Use --all-counts'], + [['-c', '--all-counts'], '', ' 2:3 3:2 4:1 5:1.*'], + # both stats and counts + [['-c', '-s', '--all-counts'], '', r' \[229725\] \[2, 1.2e\+03\] 2:3 3:2 4:1 5:1.*'], + # and must not error out if we allow for zeros + [ + ['-c', '-s', '-z', '--all-counts'], + '', + r' \[589824\] \[0, 1.2e\+03\] 0:360099 2:3 3:2 4:1 5:1.*', + ], + ], +) @script_test def test_nib_ls(args): check_nib_ls_example4d(*args) @@ -126,8 +157,7 @@ def test_nib_ls_multiple(): # verify that correctly lists/formats for multiple files fnames = [ pjoin(DATA_PATH, f) - for f in ('example4d.nii.gz', 'example_nifti2.nii.gz', - 'small.mnc', 'nifti2.hdr') + for f in ('example4d.nii.gz', 'example_nifti2.nii.gz', 'small.mnc', 'nifti2.hdr') ] code, stdout, stderr = run_command(['nib-ls'] + fnames) stdout_lines = stdout.split('\n') @@ -136,30 +166,27 @@ def test_nib_ls_multiple(): # they should be indented correctly. Since all files are int type - ln = max(len(f) for f in fnames) i_str = ' i' if sys.byteorder == 'little' else ' -ve offset - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]), + ( + (2, 3, 4), + np.diag([-1, 1, 1, 1]), + None, + (2, 3, 4), + [ + [1, 0, 0, -1], # axis reversed -> -ve offset + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], + ), # zooms for affine > 1 -> larger grid with default 1mm output voxels - ((2, 3, 4), np.diag([4, 5, 6, 1]), None, - (5, 11, 19), np.eye(4)), + ((2, 3, 4), np.diag([4, 5, 6, 1]), None, (5, 11, 19), np.eye(4)), # set output voxels to be same size as input. back to original shape - ((2, 3, 4), np.diag([4, 5, 6, 1]), (4, 5, 6), - (2, 3, 4), np.diag([4, 5, 6, 1])), + ((2, 3, 4), np.diag([4, 5, 6, 1]), (4, 5, 6), (2, 3, 4), np.diag([4, 5, 6, 1])), # Translation preserved in output - ((2, 3, 4), trans_123, None, - (2, 3, 4), trans_123), - ((2, 3, 4), trans_m123, None, - (2, 3, 4), trans_m123), + ((2, 3, 4), trans_123, None, (2, 3, 4), trans_123), + ((2, 3, 4), trans_m123, None, (2, 3, 4), trans_m123), # rotation around 3rd axis - ((2, 3, 4), rot_3, None, - # x diff, y diff now 3 cos pi / 4 == 2.12, ceil to 3, add 1 - # most negative x now 2 cos pi / 4 - (4, 4, 4), [[1, 0, 0, -2 * np.cos(np.pi / 4)], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]), + ( + (2, 3, 4), + rot_3, + None, + # x diff, y diff now 3 cos pi / 4 == 2.12, ceil to 3, add 1 + # most negative x now 2 cos pi / 4 + (4, 4, 4), + [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + ), # Less than 3 axes - ((2, 3), np.eye(4), None, - (2, 3), np.eye(4)), - ((2,), np.eye(4), None, - (2,), np.eye(4)), + ((2, 3), np.eye(4), None, (2, 3), np.eye(4)), + ((2,), np.eye(4), None, (2,), np.eye(4)), # Number of voxel sizes matches length - ((2, 3), np.diag([4, 5, 6, 1]), (4, 5), - (2, 3), np.diag([4, 5, 1, 1])), + ((2, 3), np.diag([4, 5, 6, 1]), (4, 5), (2, 3), np.diag([4, 5, 1, 1])), ) @@ -105,21 +107,21 @@ def test_vox2out_vox(): def test_slice2volume(): # Get affine expressing selection of single slice from volume - for axis, def_aff in zip((0, 1, 2), ( + for axis, def_aff in zip( + (0, 1, 2), + ( [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1]], - [[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]])): + [[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]], + ), + ): for val in (0, 5, 10): exp_aff = np.array(def_aff) exp_aff[axis, -1] = val assert (slice2volume(val, axis) == exp_aff).all() -@pytest.mark.parametrize("index, axis", [ - [-1, 0], - [0, -1], - [0, 3] -]) +@pytest.mark.parametrize('index, axis', [[-1, 0], [0, -1], [0, 3]]) def test_slice2volume_exception(index, axis): with pytest.raises(ValueError): - slice2volume(index, axis) \ No newline at end of file + slice2volume(index, axis) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 52eff4be72..cdbe8dc9f2 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing spatialimages - +"""Testing spatialimages """ import warnings @@ -33,6 +32,7 @@ from ..tmpdirs import InTemporaryDirectory from .. import load as top_load + def test_header_init(): # test the basic header hdr = SpatialHeader() @@ -70,12 +70,15 @@ def test_from_header(): assert hdr is not copy class C: + def get_data_dtype(self): + return np.dtype('u2') - def get_data_dtype(self): return np.dtype('u2') + def get_data_shape(self): + return (5, 4, 3) - def get_data_shape(self): return (5, 4, 3) + def get_zooms(self): + return (10.0, 9.0, 8.0) - def get_zooms(self): return (10.0, 9.0, 8.0) converted = SpatialHeader.from_header(C()) assert isinstance(converted, SpatialHeader) assert converted.get_data_dtype() == np.dtype('u2') @@ -151,23 +154,20 @@ def test_data_dtype(): def test_affine(): hdr = SpatialHeader(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) - assert_array_almost_equal(hdr.get_best_affine(), - [[-3.0, 0, 0, 0], - [0, 2, 0, -1], - [0, 0, 1, -1], - [0, 0, 0, 1]]) + assert_array_almost_equal( + hdr.get_best_affine(), [[-3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + ) hdr.default_x_flip = False - assert_array_almost_equal(hdr.get_best_affine(), - [[3.0, 0, 0, 0], - [0, 2, 0, -1], - [0, 0, 1, -1], - [0, 0, 0, 1]]) + assert_array_almost_equal( + hdr.get_best_affine(), [[3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + ) assert np.array_equal(hdr.get_base_affine(), hdr.get_best_affine()) def test_read_data(): class CHeader(SpatialHeader): data_layout = 'C' + for klass, order in ((SpatialHeader, 'F'), (CHeader, 'C')): hdr = klass(np.int32, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) fobj = BytesIO() @@ -359,7 +359,7 @@ def test_get_fdata(self): assert rt_img.get_fdata() is not out_data assert (rt_img.get_fdata() == in_data).all() - @expires("5.0.0") + @expires('5.0.0') def test_get_data(self): # Test array image and proxy image interface img_klass = self.image_class @@ -399,8 +399,7 @@ def test_slicer(self): in_data_template = np.arange(240, dtype=np.int16) base_affine = np.eye(4) t_axis = None - for dshape in ((4, 5, 6, 2), # Time series - (8, 5, 6)): # Volume + for dshape in ((4, 5, 6, 2), (8, 5, 6)): # Time series # Volume in_data = in_data_template.copy().reshape(dshape) img = img_klass(in_data, base_affine.copy()) @@ -408,11 +407,12 @@ def test_slicer(self): with pytest.raises(TypeError) as exception_manager: img[0, 0, 0] # Make sure the right message gets raised: - assert (str(exception_manager.value) == - "Cannot slice image objects; consider using " - "`img.slicer[slice]` to generate a sliced image (see " - "documentation for caveats) or slicing image array data " - "with `img.dataobj[slice]` or `img.get_fdata()[slice]`") + assert ( + str(exception_manager.value) == 'Cannot slice image objects; consider using ' + '`img.slicer[slice]` to generate a sliced image (see ' + 'documentation for caveats) or slicing image array data ' + 'with `img.dataobj[slice]` or `img.get_fdata()[slice]`' + ) if not spatial_axes_first(img): with pytest.raises(ValueError): @@ -425,14 +425,15 @@ def test_slicer(self): spatial_zooms = img.header.get_zooms()[:3] # Down-sample with [::2, ::2, ::2] along spatial dimensions - sliceobj = [slice(None, None, 2)] * 3 + \ - [slice(None)] * (len(dshape) - 3) + sliceobj = [slice(None, None, 2)] * 3 + [slice(None)] * (len(dshape) - 3) downsampled_img = img.slicer[tuple(sliceobj)] assert (downsampled_img.header.get_zooms()[:3] == np.array(spatial_zooms) * 2).all() - max4d = (hasattr(img.header, '_structarr') and - 'dims' in img.header._structarr.dtype.fields and - img.header._structarr['dims'].shape == (4,)) + max4d = ( + hasattr(img.header, '_structarr') + and 'dims' in img.header._structarr.dtype.fields + and img.header._structarr['dims'].shape == (4,) + ) # Check newaxis and single-slice errors with pytest.raises(IndexError): img.slicer[None] @@ -453,8 +454,7 @@ def test_slicer(self): img.slicer[:, :, :, None] else: # Reorder non-spatial axes - assert (img.slicer[:, :, :, None].shape - == img.shape[:3] + (1,) + img.shape[3:]) + assert img.slicer[:, :, :, None].shape == img.shape[:3] + (1,) + img.shape[3:] # 4D to 3D using ellipsis or slices assert img.slicer[..., 0].shape == img.shape[:-1] assert img.slicer[:, :, :, 0].shape == img.shape[:-1] @@ -510,8 +510,23 @@ def test_slicer(self): img.slicer[[0], [-1]] # Check data is consistent with slicing numpy arrays - slice_elems = np.array((None, Ellipsis, 0, 1, -1, [0], [1], [-1], - slice(None), slice(1), slice(-1), slice(1, -1)), dtype=object) + slice_elems = np.array( + ( + None, + Ellipsis, + 0, + 1, + -1, + [0], + [1], + [-1], + slice(None), + slice(1), + slice(-1), + slice(1, -1), + ), + dtype=object, + ) for n_elems in range(6): for _ in range(1 if n_elems == 0 else 10): sliceobj = tuple(np.random.choice(slice_elems, n_elems)) @@ -529,12 +544,13 @@ def test_slicer(self): class MmapImageMixin: - """ Mixin for testing images that may return memory maps """ + """Mixin for testing images that may return memory maps""" + #: whether to test mode of returned memory map check_mmap_mode = True def get_disk_image(self): - """ Return image, image filename, and flag for required scaling + """Return image, image filename, and flag for required scaling Subclasses can do anything to return an image, including loading a pre-existing image from disk. @@ -563,19 +579,22 @@ def test_load_mmap(self): with InTemporaryDirectory(): img, fname, has_scaling = self.get_disk_image() file_map = img.file_map.copy() - for func, param1 in ((img_klass.from_filename, fname), - (img_klass.load, fname), - (top_load, fname), - (img_klass.from_file_map, file_map)): + for func, param1 in ( + (img_klass.from_filename, fname), + (img_klass.load, fname), + (top_load, fname), + (img_klass.from_file_map, file_map), + ): for mmap, expected_mode in ( - # mmap value, expected memmap mode - # mmap=None -> no mmap value - # expected mode=None -> no memmap returned - (None, 'c'), - (True, 'c'), - ('c', 'c'), - ('r', 'r'), - (False, None)): + # mmap value, expected memmap mode + # mmap=None -> no mmap value + # expected mode=None -> no memmap returned + (None, 'c'), + (True, 'c'), + ('c', 'c'), + ('r', 'r'), + (False, None), + ): # If the image has scaling, then numpy 1.12 will not return # a memmap, regardless of the input flags. Previous # numpies returned a memmap object, even though the array @@ -589,7 +608,9 @@ def test_load_mmap(self): back_img = func(param1, **kwargs) back_data = np.asanyarray(back_img.dataobj) if expected_mode is None: - assert not isinstance(back_data, np.memmap), f'Should not be a {img_klass.__name__}' + assert not isinstance( + back_data, np.memmap + ), f'Should not be a {img_klass.__name__}' else: assert isinstance(back_data, np.memmap), f'Not a {img_klass.__name__}' if self.check_mmap_mode: diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index 582f6b70bd..9881a23d07 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for SPM2 header stuff """ +"""Tests for SPM2 header stuff""" import numpy as np @@ -19,6 +19,7 @@ from . import test_spm99analyze + class TestSpm2AnalyzeHeader(test_spm99analyze.TestSpm99AnalyzeHeader): header_class = Spm2AnalyzeHeader @@ -26,20 +27,21 @@ def test_slope_inter(self): hdr = self.header_class() assert hdr.get_slope_inter() == (1.0, 0.0) for in_tup, exp_err, out_tup, raw_slope in ( - ((2.0,), None, (2.0, 0.), 2.), - ((None,), None, (None, None), np.nan), - ((1.0, None), None, (1.0, 0.), 1.), - # non zero intercept causes error - ((None, 1.1), HeaderTypeError, (None, None), np.nan), - ((2.0, 1.1), HeaderTypeError, (None, None), 2.), - # null scalings - ((0.0, None), HeaderDataError, (None, None), 0.), - ((np.nan, np.nan), None, (None, None), np.nan), - ((np.nan, None), None, (None, None), np.nan), - ((None, np.nan), None, (None, None), np.nan), - ((np.inf, None), HeaderDataError, (None, None), np.inf), - ((-np.inf, None), HeaderDataError, (None, None), -np.inf), - ((None, 0.0), None, (None, None), np.nan)): + ((2.0,), None, (2.0, 0.0), 2.0), + ((None,), None, (None, None), np.nan), + ((1.0, None), None, (1.0, 0.0), 1.0), + # non zero intercept causes error + ((None, 1.1), HeaderTypeError, (None, None), np.nan), + ((2.0, 1.1), HeaderTypeError, (None, None), 2.0), + # null scalings + ((0.0, None), HeaderDataError, (None, None), 0.0), + ((np.nan, np.nan), None, (None, None), np.nan), + ((np.nan, None), None, (None, None), np.nan), + ((None, np.nan), None, (None, None), np.nan), + ((np.inf, None), HeaderDataError, (None, None), np.inf), + ((-np.inf, None), HeaderDataError, (None, None), -np.inf), + ((None, 0.0), None, (None, None), np.nan), + ): hdr = self.header_class() if not exp_err is None: with pytest.raises(exp_err): diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index d2ba898fa6..9d04643d2a 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -17,14 +17,14 @@ import pytest from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy') # Decorator to skip tests requiring save / load if scipy not available for mat # files needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') -from ..spm99analyze import (Spm99AnalyzeHeader, Spm99AnalyzeImage, - HeaderTypeError) +from ..spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage, HeaderTypeError from ..casting import type_info, shared_range from ..volumeutils import apply_read_scaling, _dt_min_max from ..spatialimages import supported_np_types, HeaderDataError @@ -33,7 +33,7 @@ bytesio_round_trip, bytesio_filemap, assert_allclose_safely, - suppress_warnings + suppress_warnings, ) from . import test_analyze @@ -48,7 +48,7 @@ class HeaderScalingMixin: - """ Mixin to add scaling tests to header tests + """Mixin to add scaling tests to header tests Needs to be a mixin so nifti tests can use this method without inheriting directly from the SPM header tests @@ -82,8 +82,7 @@ def test_data_scaling(self): assert np.all(data == data_back) -class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, - HeaderScalingMixin): +class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, HeaderScalingMixin): header_class = Spm99AnalyzeHeader def test_empty(self): @@ -109,20 +108,21 @@ def test_slope_inter(self): hdr = self.header_class() assert hdr.get_slope_inter() == (1.0, None) for in_tup, exp_err, out_tup, raw_slope in ( - ((2.0,), None, (2.0, None), 2.), - ((None,), None, (None, None), np.nan), - ((1.0, None), None, (1.0, None), 1.), - # non zero intercept causes error - ((None, 1.1), HeaderTypeError, (None, None), np.nan), - ((2.0, 1.1), HeaderTypeError, (None, None), 2.), - # null scalings - ((0.0, None), HeaderDataError, (None, None), 0.), - ((np.nan, np.nan), None, (None, None), np.nan), - ((np.nan, None), None, (None, None), np.nan), - ((None, np.nan), None, (None, None), np.nan), - ((np.inf, None), HeaderDataError, (None, None), np.inf), - ((-np.inf, None), HeaderDataError, (None, None), -np.inf), - ((None, 0.0), None, (None, None), np.nan)): + ((2.0,), None, (2.0, None), 2.0), + ((None,), None, (None, None), np.nan), + ((1.0, None), None, (1.0, None), 1.0), + # non zero intercept causes error + ((None, 1.1), HeaderTypeError, (None, None), np.nan), + ((2.0, 1.1), HeaderTypeError, (None, None), 2.0), + # null scalings + ((0.0, None), HeaderDataError, (None, None), 0.0), + ((np.nan, np.nan), None, (None, None), np.nan), + ((np.nan, None), None, (None, None), np.nan), + ((None, np.nan), None, (None, None), np.nan), + ((np.inf, None), HeaderDataError, (None, None), np.inf), + ((-np.inf, None), HeaderDataError, (None, None), -np.inf), + ((None, 0.0), None, (None, None), np.nan), + ): hdr = self.header_class() if not exp_err is None: with pytest.raises(exp_err): @@ -146,9 +146,11 @@ def test_origin_checks(self): hdr['origin'][0] = 101 # severity 20 fhdr, message, raiser = self.log_chk(hdr, 20) assert fhdr == hdr - assert (message == 'very large origin values ' - 'relative to dims; leaving as set, ' - 'ignoring for affine') + assert ( + message == 'very large origin values ' + 'relative to dims; leaving as set, ' + 'ignoring for affine' + ) pytest.raises(*raiser) # diagnose binary block dxer = self.header_class.diagnose_binaryblock @@ -229,21 +231,13 @@ def test_header_scaling(self): if not hdr_class.has_data_intercept: return invalid_inters = (np.nan, np.inf, -np.inf) - invalid_pairs = tuple( - itertools.product(invalid_slopes, invalid_inters)) - bad_slopes_good_inter = tuple( - itertools.product(invalid_slopes, (0, 1))) - good_slope_bad_inters = tuple( - itertools.product((1, 2), invalid_inters)) - for slope, inter in (invalid_pairs + bad_slopes_good_inter + - good_slope_bad_inters): + invalid_pairs = tuple(itertools.product(invalid_slopes, invalid_inters)) + bad_slopes_good_inter = tuple(itertools.product(invalid_slopes, (0, 1))) + good_slope_bad_inters = tuple(itertools.product((1, 2), invalid_inters)) + for slope, inter in invalid_pairs + bad_slopes_good_inter + good_slope_bad_inters: self.assert_null_scaling(arr, slope, inter) - def _check_write_scaling(self, - slope, - inter, - effective_slope, - effective_inter): + def _check_write_scaling(self, slope, inter, effective_slope, effective_inter): # Test that explicit set of slope / inter forces write of data using # this slope, inter. We use this helper function for children of the # Analyze header @@ -275,16 +269,13 @@ def _check_write_scaling(self, assert_array_equal(img.get_fdata(), arr) # But the array scaled after round trip img_rt = bytesio_round_trip(img) - assert_array_equal(img_rt.get_fdata(), - apply_read_scaling(arr, - effective_slope, - effective_inter)) + assert_array_equal( + img_rt.get_fdata(), apply_read_scaling(arr, effective_slope, effective_inter) + ) # The scaling set into the array proxy do_slope, do_inter = img.header.get_slope_inter() - assert_array_equal(img_rt.dataobj.slope, - 1 if do_slope is None else do_slope) - assert_array_equal(img_rt.dataobj.inter, - 0 if do_inter is None else do_inter) + assert_array_equal(img_rt.dataobj.slope, 1 if do_slope is None else do_slope) + assert_array_equal(img_rt.dataobj.inter, 0 if do_inter is None else do_inter) # The new header scaling has been reset self.assert_scale_me_scaling(img_rt.header) # But the original is the same as it was when we set it @@ -293,20 +284,19 @@ def _check_write_scaling(self, img.header.set_data_dtype(np.uint8) with np.errstate(invalid='ignore'): img_rt = bytesio_round_trip(img) - assert_array_equal(img_rt.get_fdata(), - apply_read_scaling(np.round(arr), - effective_slope, - effective_inter)) + assert_array_equal( + img_rt.get_fdata(), apply_read_scaling(np.round(arr), effective_slope, effective_inter) + ) # But we have to clip too arr[-1, -1, -1] = 256 arr[-2, -1, -1] = -1 with np.errstate(invalid='ignore'): img_rt = bytesio_round_trip(img) exp_unscaled_arr = np.clip(np.round(arr), 0, 255) - assert_array_equal(img_rt.get_fdata(), - apply_read_scaling(exp_unscaled_arr, - effective_slope, - effective_inter)) + assert_array_equal( + img_rt.get_fdata(), + apply_read_scaling(exp_unscaled_arr, effective_slope, effective_inter), + ) def test_int_int_scaling(self): # Check int to int conversion without slope, inter @@ -328,9 +318,7 @@ def test_no_scaling(self): # Any old non-default slope and intercept slope = 2 inter = 10 if hdr.has_data_intercept else 0 - for in_dtype, out_dtype in itertools.product( - FLOAT_TYPES + IUINT_TYPES, - supported_types): + for in_dtype, out_dtype in itertools.product(FLOAT_TYPES + IUINT_TYPES, supported_types): # Need to check complex scaling mn_in, mx_in = _dt_min_max(in_dtype) arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype) @@ -354,8 +342,7 @@ def test_no_scaling(self): exp_back = np.round(exp_back) if in_dtype in FLOAT_TYPES: # Clip to shared range of working precision - exp_back = np.clip(exp_back, - *shared_range(float, out_dtype)) + exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) else: # iu input and output type # No scaling, never gets converted to float. # Does get clipped to range of output type @@ -363,9 +350,7 @@ def test_no_scaling(self): if (mn_in, mx_in) != (mn_out, mx_out): # Use smaller of input, output range to avoid np.clip # upcasting the array because of large clip limits. - exp_back = np.clip(exp_back, - max(mn_in, mn_out), - min(mx_in, mx_out)) + exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) if out_dtype in COMPLEX_TYPES: # always cast to real from complex exp_back = exp_back.astype(out_dtype) @@ -374,8 +359,7 @@ def test_no_scaling(self): exp_back = exp_back.astype(float) # Allow for small differences in large numbers with suppress_warnings(): # invalid value - assert_allclose_safely(back_arr, - exp_back * slope + inter) + assert_allclose_safely(back_arr, exp_back * slope + inter) def test_write_scaling(self): # Check writes with scaling set @@ -414,7 +398,9 @@ class TestSpm99AnalyzeImage(test_analyze.TestAnalyzeImage, ImageScalingMixin): test_header_updating = needs_scipy(test_analyze.TestAnalyzeImage.test_header_updating) test_offset_to_zero = needs_scipy(test_analyze.TestAnalyzeImage.test_offset_to_zero) test_big_offset_exts = needs_scipy(test_analyze.TestAnalyzeImage.test_big_offset_exts) - test_dtype_to_filename_arg = needs_scipy(test_analyze.TestAnalyzeImage.test_dtype_to_filename_arg) + test_dtype_to_filename_arg = needs_scipy( + test_analyze.TestAnalyzeImage.test_dtype_to_filename_arg + ) test_header_scaling = needs_scipy(ImageScalingMixin.test_header_scaling) test_int_int_scaling = needs_scipy(ImageScalingMixin.test_int_int_scaling) test_write_scaling = needs_scipy(ImageScalingMixin.test_write_scaling) @@ -441,6 +427,7 @@ def test_mat_read(self): # the saved mat file mat_fileobj = img.file_map['mat'].fileobj from scipy.io import loadmat, savemat + mat_fileobj.seek(0) mats = loadmat(mat_fileobj) assert 'M' in mats and 'mat' in mats @@ -458,21 +445,18 @@ def test_mat_read(self): flipper = np.diag([-1, 1, 1, 1]) assert_array_equal(mats['M'], np.dot(aff, np.dot(flipper, from_111))) mat_fileobj.seek(0) - savemat(mat_fileobj, - dict(M=np.diag([3, 4, 5, 1]), mat=np.diag([6, 7, 8, 1]))) + savemat(mat_fileobj, dict(M=np.diag([3, 4, 5, 1]), mat=np.diag([6, 7, 8, 1]))) # Check we are preferring the 'mat' matrix r_img = img_klass.from_file_map(fm) assert_array_equal(r_img.get_fdata(), arr) - assert_array_equal(r_img.affine, - np.dot(np.diag([6, 7, 8, 1]), to_111)) + assert_array_equal(r_img.affine, np.dot(np.diag([6, 7, 8, 1]), to_111)) # But will use M if present mat_fileobj.seek(0) mat_fileobj.truncate(0) savemat(mat_fileobj, dict(M=np.diag([3, 4, 5, 1]))) r_img = img_klass.from_file_map(fm) assert_array_equal(r_img.get_fdata(), arr) - assert_array_equal(r_img.affine, - np.dot(np.diag([3, 4, 5, 1]), np.dot(flipper, to_111))) + assert_array_equal(r_img.affine, np.dot(np.diag([3, 4, 5, 1]), np.dot(flipper, to_111))) def test_none_affine(self): # Allow for possibility of no affine resulting in nothing written into @@ -499,29 +483,41 @@ def test_origin_affine(): assert hdr.default_x_flip assert_array_almost_equal( hdr.get_origin_affine(), # from center of image - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr['origin'][:3] = [3, 4, 5] assert_array_almost_equal( hdr.get_origin_affine(), # using origin - [[-3., 0., 0., 6.], - [0., 2., 0., -6.], - [0., 0., 1., -4.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 6.0], + [0.0, 2.0, 0.0, -6.0], + [0.0, 0.0, 1.0, -4.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr['origin'] = 0 # unset origin hdr.set_data_shape((3, 5)) assert_array_almost_equal( hdr.get_origin_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -0.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr.set_data_shape((3, 5, 7)) assert_array_almost_equal( hdr.get_origin_affine(), # from center of image - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 6b7a25ceb2..11a46bafdb 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -1,4 +1,4 @@ -""" Tests for warnings context managers +"""Tests for warnings context managers """ import sys @@ -7,11 +7,19 @@ import numpy as np -from ..testing import (error_warnings, suppress_warnings, - clear_and_catch_warnings, assert_allclose_safely, - get_fresh_mod, assert_re_in, test_data, data_path) +from ..testing import ( + error_warnings, + suppress_warnings, + clear_and_catch_warnings, + assert_allclose_safely, + get_fresh_mod, + assert_re_in, + test_data, + data_path, +) import pytest + def test_assert_allclose_safely(): # Test the safe version of allclose assert_allclose_safely([1, 1], [1, 1]) @@ -114,6 +122,7 @@ def test_warn_error(): def f(): with error_warnings(): raise ValueError('An error') + with pytest.raises(ValueError): f() @@ -133,33 +142,39 @@ def test_warn_ignore(): def f(): with suppress_warnings(): raise ValueError('An error') + with pytest.raises(ValueError): f() -@pytest.mark.parametrize("regex, entries", [ - [".*", ""], - [".*", ["any"]], - ["ab", "abc"], - # Sufficient to have one entry matching - ["ab", ["", "abc", "laskdjf"]], - # Tuples should be ok too - ["ab", ("", "abc", "laskdjf")], - # Should do match not search - pytest.param("ab", "cab", marks=pytest.mark.xfail), - pytest.param("ab$", "abc", marks=pytest.mark.xfail), - pytest.param("ab$", ["ddd", ""], marks=pytest.mark.xfail), - pytest.param("ab$", ("ddd", ""), marks=pytest.mark.xfail), - #Shouldn't "match" the empty list - pytest.param("", [], marks=pytest.mark.xfail) -]) + +@pytest.mark.parametrize( + 'regex, entries', + [ + ['.*', ''], + ['.*', ['any']], + ['ab', 'abc'], + # Sufficient to have one entry matching + ['ab', ['', 'abc', 'laskdjf']], + # Tuples should be ok too + ['ab', ('', 'abc', 'laskdjf')], + # Should do match not search + pytest.param('ab', 'cab', marks=pytest.mark.xfail), + pytest.param('ab$', 'abc', marks=pytest.mark.xfail), + pytest.param('ab$', ['ddd', ''], marks=pytest.mark.xfail), + pytest.param('ab$', ('ddd', ''), marks=pytest.mark.xfail), + # Shouldn't "match" the empty list + pytest.param('', [], marks=pytest.mark.xfail), + ], +) def test_assert_re_in(regex, entries): assert_re_in(regex, entries) def test_test_data(): assert test_data() == data_path - assert test_data() == os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', 'tests', 'data')) + assert test_data() == os.path.abspath( + os.path.join(os.path.dirname(__file__), '..', 'tests', 'data') + ) for subdir in ('nicom', 'gifti', 'externals'): assert test_data(subdir) == os.path.join(data_path[:-10], subdir, 'tests', 'data') assert os.path.exists(test_data(subdir)) @@ -171,8 +186,10 @@ def test_test_data(): assert not os.path.exists(test_data(None, 'doesnotexist')) - for subdir, fname in [('gifti', 'ascii.gii'), - ('nicom', '0.dcm'), - ('externals', 'example_1.nc'), - (None, 'empty.tck')]: + for subdir, fname in [ + ('gifti', 'ascii.gii'), + ('nicom', '0.dcm'), + ('externals', 'example_1.nc'), + (None, 'empty.tck'), + ]: assert os.path.exists(test_data(subdir, fname)) diff --git a/nibabel/tests/test_tmpdirs.py b/nibabel/tests/test_tmpdirs.py index c4d119b14f..2c0c5199ce 100644 --- a/nibabel/tests/test_tmpdirs.py +++ b/nibabel/tests/test_tmpdirs.py @@ -1,4 +1,4 @@ -""" Test tmpdirs module """ +"""Test tmpdirs module""" from os import getcwd from os.path import realpath, abspath, dirname, isfile diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index 2ec3e06182..0efddbe8bb 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -1,10 +1,11 @@ -""" Testing tripwire module +"""Testing tripwire module """ from ..tripwire import TripWire, is_tripwire, TripWireError import pytest + def test_is_tripwire(): assert not is_tripwire(object()) assert is_tripwire(TripWire('some message')) @@ -21,4 +22,4 @@ def test_tripwire(): except TripWireError as err: assert isinstance(err, AttributeError) else: - raise RuntimeError("No error raised, but expected") + raise RuntimeError('No error raised, but expected') diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index fd1109eaff..04e616fedd 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -33,9 +33,9 @@ def test_viewer(): # Test viewer plt = optional_package('matplotlib.pyplot')[0] a = np.sin(np.linspace(0, np.pi, 20)) - b = np.sin(np.linspace(0, np.pi*5, 30)) + b = np.sin(np.linspace(0, np.pi * 5, 30)) data = (np.outer(a, b)[..., np.newaxis] * a)[:, :, :, np.newaxis] - data = data * np.array([1., 2.]) # give it a # of volumes > 1 + data = data * np.array([1.0, 2.0]) # give it a # of volumes > 1 v = OrthoSlicer3D(data) assert_array_equal(v.position, (0, 0, 0)) assert 'OrthoSlicer3D' in repr(v) @@ -54,7 +54,7 @@ def test_viewer(): v.cmap = 'hot' v.clim = (0, 3) with pytest.raises(ValueError): - OrthoSlicer3D.clim.fset(v, (0.,)) # bad limits + OrthoSlicer3D.clim.fset(v, (0.0,)) # bad limits with pytest.raises( ( ValueError, # MPL3.5 and lower @@ -90,8 +90,7 @@ def test_viewer(): fig, axes = plt.subplots(1, 4) plt.close(fig) v1 = OrthoSlicer3D(data, axes=axes) - aff = np.array([[0, 1, 0, 3], [-1, 0, 0, 2], [0, 0, 2, 1], [0, 0, 0, 1]], - float) + aff = np.array([[0, 1, 0, 3], [-1, 0, 0, 2], [0, 0, 2, 1], [0, 0, 0, 1]], float) v2 = OrthoSlicer3D(data, affine=aff, axes=axes[:3]) # bad data (not 3+ dim) with pytest.raises(ValueError): diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 3e6ba1bab4..c2104b5b59 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for volumeutils module """ +"""Test for volumeutils module""" import os from os.path import exists @@ -26,38 +26,42 @@ from ..tmpdirs import InTemporaryDirectory from ..openers import ImageOpener -from ..volumeutils import (array_from_file, - _is_compressed_fobj, - array_to_file, - fname_ext_ul_case, - write_zeros, - seek_tell, - apply_read_scaling, - working_type, - best_write_scale_ftype, - better_float_of, - int_scinter_ftype, - make_dt_codes, - native_code, - shape_zoom_affine, - rec2dict, - _dt_min_max, - _write_data, - _ftype4scaled_finite, - ) +from ..volumeutils import ( + array_from_file, + _is_compressed_fobj, + array_to_file, + fname_ext_ul_case, + write_zeros, + seek_tell, + apply_read_scaling, + working_type, + best_write_scale_ftype, + better_float_of, + int_scinter_ftype, + make_dt_codes, + native_code, + shape_zoom_affine, + rec2dict, + _dt_min_max, + _write_data, + _ftype4scaled_finite, +) from ..openers import Opener, BZ2File -from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range) +from ..casting import floor_log2, type_info, OK_FLOATS, shared_range from ..optpkg import optional_package -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) +from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest -from nibabel.testing import (assert_dt_equal, assert_allclose_safely, - suppress_warnings, error_warnings) +from nibabel.testing import ( + assert_dt_equal, + assert_allclose_safely, + suppress_warnings, + error_warnings, +) -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') #: convenience variables for numpy types FLOAT_TYPES = np.sctypes['float'] @@ -73,9 +77,7 @@ def test__is_compressed_fobj(): # _is_compressed helper function with InTemporaryDirectory(): - file_openers = [('', open, False), - ('.gz', gzip.open, True), - ('.bz2', BZ2File, True)] + file_openers = [('', open, False), ('.gz', gzip.open, True), ('.bz2', BZ2File, True)] if HAVE_ZSTD: file_openers += [('.zst', pyzstd.ZstdFile, True)] for ext, opener, compressed in file_openers: @@ -102,9 +104,7 @@ def make_array(n, bytes): openers = [open, gzip.open, BZ2File] if HAVE_ZSTD: openers += [pyzstd.ZstdFile] - for n, opener in itertools.product( - (256, 1024, 2560, 25600), - openers): + for n, opener in itertools.product((256, 1024, 2560, 25600), openers): in_arr = np.arange(n, dtype=dtype) # Write array to file fobj_w = opener(fname, 'wb') @@ -218,18 +218,14 @@ def test_array_from_file_mmap(): def buf_chk(in_arr, out_buf, in_buf, offset): - """ Write contents of in_arr into fileobj, read back, check same """ + """Write contents of in_arr into fileobj, read back, check same""" instr = b' ' * offset + in_arr.tobytes(order='F') out_buf.write(instr) out_buf.flush() if in_buf is None: # we're using in_buf from out_buf out_buf.seek(0) in_buf = out_buf - arr = array_from_file( - in_arr.shape, - in_arr.dtype, - in_buf, - offset) + arr = array_from_file(in_arr.shape, in_arr.dtype, in_buf, offset) return np.allclose(in_arr, arr) @@ -242,8 +238,7 @@ def test_array_from_file_openers(): extensions = ['', '.gz', '.bz2'] if HAVE_ZSTD: extensions += ['.zst'] - for ext, offset in itertools.product(extensions, - (0, 5, 10)): + for ext, offset in itertools.product(extensions, (0, 5, 10)): fname = 'test.bin' + ext with Opener(fname, 'wb') as out_buf: if offset != 0: # avoid https://bugs.python.org/issue16828 @@ -267,10 +262,8 @@ def test_array_from_file_reread(): if HAVE_ZSTD: openers += [pyzstd.ZstdFile] for shape, opener, dtt, order in itertools.product( - ((64,), (64, 65), (64, 65, 66)), - openers, - (np.int16, np.float32), - ('F', 'C')): + ((64,), (64, 65), (64, 65, 66)), openers, (np.int16, np.float32), ('F', 'C') + ): n_els = np.prod(shape) in_arr = np.arange(n_els, dtype=dtt).reshape(shape) is_bio = hasattr(opener, 'getvalue') @@ -308,8 +301,7 @@ def test_array_to_file(): ndt = dt.newbyteorder(code) for allow_intercept in (True, False): scale, intercept, mn, mx = _calculate_scale(arr, ndt, allow_intercept) - data_back = write_return(arr, str_io, ndt, - 0, intercept, scale) + data_back = write_return(arr, str_io, ndt, 0, intercept, scale) assert_array_almost_equal(arr, data_back) # Test array-like str_io = BytesIO() @@ -340,8 +332,9 @@ def test_a2f_upscale(): inter = info['min'] str_io = BytesIO() # We need to provide mn, mx for function to be able to calculate upcasting - array_to_file(arr, str_io, np.uint8, intercept=inter, divslope=slope, - mn=info['min'], mx=info['max']) + array_to_file( + arr, str_io, np.uint8, intercept=inter, divslope=slope, mn=info['min'], mx=info['max'] + ) raw = array_from_file(arr.shape, np.uint8, str_io) back = apply_read_scaling(raw, slope, inter) top = back - arr @@ -429,13 +422,11 @@ def test_a2f_nan2zero_scaling(): # Array values including zero before scaling but not after bio = BytesIO() for in_dt, out_dt, zero_in, inter in itertools.product( - FLOAT_TYPES, - IUINT_TYPES, - (True, False), - (0, -100)): + FLOAT_TYPES, IUINT_TYPES, (True, False), (0, -100) + ): in_info = np.finfo(in_dt) out_info = np.iinfo(out_dt) - mx = min(in_info.max, out_info.max * 2., 2**32) + inter + mx = min(in_info.max, out_info.max * 2.0, 2**32) + inter mn = 0 if zero_in or inter else 100 vals = [np.nan] + [mn, mx] nan_arr = np.array(vals, dtype=in_dt) @@ -499,15 +490,21 @@ def test_a2f_big_scalers(): # We need nan2zero=False because we can't represent 0 in the input, given # the scaling and the output range. with suppress_warnings(): # overflow - array_to_file(arr, str_io, np.int8, intercept=np.float32(2**120), - nan2zero=False) + array_to_file(arr, str_io, np.int8, intercept=np.float32(2**120), nan2zero=False) data_back = array_from_file(arr.shape, np.int8, str_io) assert_array_equal(data_back, [-128, -128, 127]) # Scales also if mx, mn specified? Same notes and complaints as for the test # above. str_io.seek(0) - array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'], - intercept=np.float32(2**120), nan2zero=False) + array_to_file( + arr, + str_io, + np.int8, + mn=info['min'], + mx=info['max'], + intercept=np.float32(2**120), + nan2zero=False, + ) data_back = array_from_file(arr.shape, np.int8, str_io) assert_array_equal(data_back, [-128, -128, 127]) # And if slope causes overflow? @@ -518,8 +515,7 @@ def test_a2f_big_scalers(): assert_array_equal(data_back, [-128, 0, 127]) # with mn, mx specified? str_io.seek(0) - array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'], - divslope=np.float32(0.5)) + array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'], divslope=np.float32(0.5)) data_back = array_from_file(arr.shape, np.int8, str_io) assert_array_equal(data_back, [-128, 0, 127]) @@ -529,13 +525,13 @@ def test_a2f_int_scaling(): arr = np.array([0, 1, 128, 255], dtype=np.uint8) fobj = BytesIO() back_arr = write_return(arr, fobj, np.uint8, intercept=1) - assert_array_equal(back_arr, np.clip(arr - 1., 0, 255)) + assert_array_equal(back_arr, np.clip(arr - 1.0, 0, 255)) back_arr = write_return(arr, fobj, np.uint8, divslope=2) - assert_array_equal(back_arr, np.round(np.clip(arr / 2., 0, 255))) + assert_array_equal(back_arr, np.round(np.clip(arr / 2.0, 0, 255))) back_arr = write_return(arr, fobj, np.uint8, intercept=1, divslope=2) - assert_array_equal(back_arr, np.round(np.clip((arr - 1.) / 2., 0, 255))) + assert_array_equal(back_arr, np.round(np.clip((arr - 1.0) / 2.0, 0, 255))) back_arr = write_return(arr, fobj, np.int16, intercept=1, divslope=2) - assert_array_equal(back_arr, np.round((arr - 1.) / 2.)) + assert_array_equal(back_arr, np.round((arr - 1.0) / 2.0)) def test_a2f_scaled_unscaled(): @@ -543,10 +539,8 @@ def test_a2f_scaled_unscaled(): # without scaling fobj = BytesIO() for in_dtype, out_dtype, intercept, divslope in itertools.product( - NUMERIC_TYPES, - NUMERIC_TYPES, - (0, 0.5, -1, 1), - (1, 0.5, 2)): + NUMERIC_TYPES, NUMERIC_TYPES, (0, 0.5, -1, 1), (1, 0.5, 2) + ): mn_in, mx_in = _dt_min_max(in_dtype) nan_val = np.nan if in_dtype in CFLOAT_TYPES else 10 arr = np.array([mn_in, -1, 0, 1, mx_in, nan_val], dtype=in_dtype) @@ -556,31 +550,28 @@ def test_a2f_scaled_unscaled(): if out_dtype in IUINT_TYPES: nan_fill = np.round(nan_fill) # nan2zero will check whether 0 in scaled to a valid value in output - if (in_dtype in CFLOAT_TYPES and not mn_out <= nan_fill <= mx_out): + if in_dtype in CFLOAT_TYPES and not mn_out <= nan_fill <= mx_out: with pytest.raises(ValueError): - array_to_file(arr, - fobj, - out_dtype=out_dtype, - divslope=divslope, - intercept=intercept) + array_to_file( + arr, fobj, out_dtype=out_dtype, divslope=divslope, intercept=intercept + ) continue with suppress_warnings(): - back_arr = write_return(arr, fobj, - out_dtype=out_dtype, - divslope=divslope, - intercept=intercept) + back_arr = write_return( + arr, fobj, out_dtype=out_dtype, divslope=divslope, intercept=intercept + ) exp_back = arr.copy() - if (in_dtype in IUINT_TYPES and - out_dtype in IUINT_TYPES and - (intercept, divslope) == (0, 1)): + if ( + in_dtype in IUINT_TYPES + and out_dtype in IUINT_TYPES + and (intercept, divslope) == (0, 1) + ): # Direct iu to iu casting. # Need to clip if ranges not the same. # Use smaller of input, output range to avoid np.clip upcasting # the array because of large clip limits. if (mn_in, mx_in) != (mn_out, mx_out): - exp_back = np.clip(exp_back, - max(mn_in, mn_out), - min(mx_in, mx_out)) + exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) else: # Need to deal with nans, casting to float, clipping if in_dtype in CFLOAT_TYPES and out_dtype in IUINT_TYPES: exp_back[np.isnan(exp_back)] = 0 @@ -590,8 +581,7 @@ def test_a2f_scaled_unscaled(): exp_back -= intercept if divslope != 1: exp_back /= divslope - if (exp_back.dtype.type in CFLOAT_TYPES and - out_dtype in IUINT_TYPES): + if exp_back.dtype.type in CFLOAT_TYPES and out_dtype in IUINT_TYPES: exp_back = np.round(exp_back).astype(float) exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) exp_back = exp_back.astype(out_dtype) @@ -611,40 +601,32 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([np.sctypes[key] for key in ['int', - 'uint', - 'float', - 'complex']], - []) + NUMERICAL_TYPES = sum([np.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) for in_type, out_type, slope, inter in itertools.product( - NUMERICAL_TYPES, - NUMERICAL_TYPES, - (None, 1, 0, np.nan, -np.inf, np.inf), - (0, np.nan, -np.inf, np.inf)): + NUMERICAL_TYPES, + NUMERICAL_TYPES, + (None, 1, 0, np.nan, -np.inf, np.inf), + (0, np.nan, -np.inf, np.inf), + ): arr = np.ones((2,), dtype=in_type) fobj = BytesIO() cm = error_warnings() - if (np.issubdtype(in_type, np.complexfloating) and - not np.issubdtype(out_type, np.complexfloating)): + if np.issubdtype(in_type, np.complexfloating) and not np.issubdtype( + out_type, np.complexfloating + ): cm = pytest.warns(np.ComplexWarning) if (slope, inter) == (1, 0): with cm: - assert_array_equal(arr, - write_return(arr, fobj, out_type, - intercept=inter, - divslope=slope)) + assert_array_equal( + arr, write_return(arr, fobj, out_type, intercept=inter, divslope=slope) + ) elif (slope, inter) == (None, 0): - assert_array_equal(0, - write_return(arr, fobj, out_type, - intercept=inter, - divslope=slope)) + assert_array_equal( + 0, write_return(arr, fobj, out_type, intercept=inter, divslope=slope) + ) else: with pytest.raises(ValueError): - array_to_file(arr, - fobj, - np.int8, - intercept=inter, - divslope=slope) + array_to_file(arr, fobj, np.int8, intercept=inter, divslope=slope) def test_a2f_nan2zero_range(): @@ -664,8 +646,9 @@ def test_a2f_nan2zero_range(): # Pushing zero outside the output data range does not generate error back_arr = write_return(arr_no_nan, fobj, np.int8, intercept=129, nan2zero=True) assert_array_equal([-128, -128, -128, -127], back_arr) - back_arr = write_return(arr_no_nan, fobj, np.int8, - intercept=257.1, divslope=2, nan2zero=True) + back_arr = write_return( + arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2, nan2zero=True + ) assert_array_equal([-128, -128, -128, -128], back_arr) for dt in CFLOAT_TYPES: arr = np.array([-1, 0, 1, np.nan], dtype=dt) @@ -678,12 +661,10 @@ def test_a2f_nan2zero_range(): # No errors from explicit thresholding # mn thresholding excluding zero with pytest.warns(complex_warn) if complex_warn else error_warnings(): - assert_array_equal([1, 1, 1, 0], - write_return(arr, fobj, np.int8, mn=1)) + assert_array_equal([1, 1, 1, 0], write_return(arr, fobj, np.int8, mn=1)) # mx thresholding excluding zero with pytest.warns(complex_warn) if complex_warn else error_warnings(): - assert_array_equal([-1, -1, -1, 0], - write_return(arr, fobj, np.int8, mx=-1)) + assert_array_equal([-1, -1, -1, 0], write_return(arr, fobj, np.int8, mx=-1)) # Errors from datatype threshold after scaling with pytest.warns(complex_warn) if complex_warn else error_warnings(): back_arr = write_return(arr, fobj, np.int8, intercept=128) @@ -708,8 +689,9 @@ def test_a2f_nan2zero_range(): write_return(arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2) # OK with nan2zero false with pytest.warns(c_and_n_warn) if c_and_n_warn else error_warnings(): - back_arr = write_return(arr, fobj, np.int8, - intercept=257.1, divslope=2, nan2zero=False) + back_arr = write_return( + arr, fobj, np.int8, intercept=257.1, divslope=2, nan2zero=False + ) assert_array_equal([-128, -128, -128, nan_cast], back_arr) @@ -769,7 +751,7 @@ def test_apply_scaling(): assert (i16_arr * big).dtype == np.float32 # An equivalent case is a little hard to find for the intercept nmant_32 = type_info(np.float32)['nmant'] - big_delta = np.float32(2**(floor_log2(big) - nmant_32)) + big_delta = np.float32(2 ** (floor_log2(big) - nmant_32)) assert (i16_arr * big_delta + big).dtype == np.float32 # Upcasting does occur with this routine assert apply_read_scaling(i16_arr, big).dtype == np.float64 @@ -783,10 +765,8 @@ def test_apply_scaling(): assert apply_read_scaling(np.int8(0), f32(-1e38), f32(0.0)).dtype == np.float64 # Non-zero intercept still generates floats assert_dt_equal(apply_read_scaling(i16_arr, 1.0, 1.0).dtype, float) - assert_dt_equal(apply_read_scaling( - np.zeros((1,), dtype=np.int32), 1.0, 1.0).dtype, float) - assert_dt_equal(apply_read_scaling( - np.zeros((1,), dtype=np.int64), 1.0, 1.0).dtype, float) + assert_dt_equal(apply_read_scaling(np.zeros((1,), dtype=np.int32), 1.0, 1.0).dtype, float) + assert_dt_equal(apply_read_scaling(np.zeros((1,), dtype=np.int64), 1.0, 1.0).dtype, float) def test_apply_read_scaling_ints(): @@ -799,7 +779,7 @@ def test_apply_read_scaling_ints(): def test_apply_read_scaling_nones(): # Check that we can pass None as slope and inter to apply read scaling - arr=np.arange(10, dtype=np.int16) + arr = np.arange(10, dtype=np.int16) assert_array_equal(apply_read_scaling(arr, None, None), arr) assert_array_equal(apply_read_scaling(arr, 2, None), arr * 2) assert_array_equal(apply_read_scaling(arr, None, 1), arr + 1) @@ -819,6 +799,7 @@ def test_working_type(): # need this because of the very confusing np.int32 != np.intp (on 32 bit). def wt(*args, **kwargs): return np.dtype(working_type(*args, **kwargs)).str + d1 = np.atleast_1d for in_type in NUMERIC_TYPES: in_ts = np.dtype(in_type).str @@ -851,6 +832,7 @@ def test_better_float(): # Better float function def check_against(f1, f2): return f1 if FLOAT_TYPES.index(f1) >= FLOAT_TYPES.index(f2) else f2 + for first in FLOAT_TYPES: for other in IUINT_TYPES + np.sctypes['complex']: assert better_float_of(first, other) == first @@ -884,7 +866,7 @@ def test_best_write_scale_ftype(): L_info = type_info(lower_t) t_max = L_info['max'] nmant = L_info['nmant'] # number of significand digits - big_delta = lower_t(2**(floor_log2(t_max) - nmant)) # delta below max + big_delta = lower_t(2 ** (floor_log2(t_max) - nmant)) # delta below max # Even large values that don't overflow don't change output arr = np.array([0, t_max], dtype=lower_t) assert best_write_scale_ftype(arr, 1, 0) == lower_t @@ -995,9 +977,9 @@ def test_seek_tell_logic(): assert bio.tell() == 10 class BabyBio(BytesIO): - def seek(self, *args): raise OSError() + bio = BabyBio() # Fresh fileobj, position 0, can't seek - error with pytest.raises(OSError): @@ -1044,22 +1026,19 @@ def test_shape_zoom_affine(): shape = (3, 5, 7) zooms = (3, 2, 1) res = shape_zoom_affine(shape, zooms) - exp = np.array([[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + exp = np.array( + [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + ) assert_array_almost_equal(res, exp) res = shape_zoom_affine((3, 5), (3, 2)) - exp = np.array([[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -0.], - [0., 0., 0., 1.]]) + exp = np.array( + [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -0.0], [0.0, 0.0, 0.0, 1.0]] + ) assert_array_almost_equal(res, exp) res = shape_zoom_affine(shape, zooms, False) - exp = np.array([[3., 0., 0., -3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + exp = np.array( + [[3.0, 0.0, 0.0, -3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + ) assert_array_almost_equal(res, exp) @@ -1096,12 +1075,10 @@ def test_dtypes(): dtr = make_dt_codes(dt_defs) assert dtr[np.dtype('f4').newbyteorder('S')] == 16 assert dtr.value_set() == set((16,)) - assert dtr.fields == ('code', 'label', 'type', 'niistring', 'dtype', - 'sw_dtype') + assert dtr.fields == ('code', 'label', 'type', 'niistring', 'dtype', 'sw_dtype') assert dtr.niistring[16] == 'ASTRING' # And that unequal elements raises error - dt_defs = ((16, 'float32', np.float32, 'ASTRING'), - (16, 'float32', np.float32)) + dt_defs = ((16, 'float32', np.float32, 'ASTRING'), (16, 'float32', np.float32)) with pytest.raises(ValueError): make_dt_codes(dt_defs) # And that 2 or 5 elements raises error @@ -1117,16 +1094,18 @@ def test__write_data(): # Test private utility function for writing data itp = itertools.product - def assert_rt(data, - shape, - out_dtype, - order='F', - in_cast=None, - pre_clips=None, - inter=0., - slope=1., - post_clips=None, - nan_fill=None): + def assert_rt( + data, + shape, + out_dtype, + order='F', + in_cast=None, + pre_clips=None, + inter=0.0, + slope=1.0, + post_clips=None, + nan_fill=None, + ): sio = BytesIO() to_write = data.reshape(shape) # to check that we didn't modify in-place @@ -1134,11 +1113,11 @@ def assert_rt(data, nan_positions = np.isnan(to_write) have_nans = np.any(nan_positions) if have_nans and nan_fill is None and not out_dtype.type == 'f': - raise ValueError("Cannot handle this case") - _write_data(to_write, sio, out_dtype, order, in_cast, pre_clips, inter, - slope, post_clips, nan_fill) - arr = np.ndarray(shape, out_dtype, buffer=sio.getvalue(), - order=order) + raise ValueError('Cannot handle this case') + _write_data( + to_write, sio, out_dtype, order, in_cast, pre_clips, inter, slope, post_clips, nan_fill + ) + arr = np.ndarray(shape, out_dtype, buffer=sio.getvalue(), order=order) expected = to_write.copy() if have_nans and not nan_fill is None: expected[nan_positions] = nan_fill * slope + inter @@ -1147,37 +1126,51 @@ def assert_rt(data, # check shape writing for shape, order in itp( - ((24,), (24, 1), (24, 1, 1), (1, 24), (1, 1, 24), (2, 3, 4), - (6, 1, 4), (1, 6, 4), (6, 4, 1)), - 'FC'): + ( + (24,), + (24, 1), + (24, 1, 1), + (1, 24), + (1, 1, 24), + (2, 3, 4), + (6, 1, 4), + (1, 6, 4), + (6, 4, 1), + ), + 'FC', + ): assert_rt(np.arange(24), shape, np.int16, order=order) # check defense against modifying data in-place for in_cast, pre_clips, inter, slope, post_clips, nan_fill in itp( - (None, np.float32), - (None, (-1, 25)), - (0., 1.), - (1., 0.5), - (None, (-2, 49)), - (None, 1)): + (None, np.float32), (None, (-1, 25)), (0.0, 1.0), (1.0, 0.5), (None, (-2, 49)), (None, 1) + ): data = np.arange(24).astype(np.float32) - assert_rt(data, shape, np.int16, - in_cast=in_cast, - pre_clips=pre_clips, - inter=inter, - slope=slope, - post_clips=post_clips, - nan_fill=nan_fill) + assert_rt( + data, + shape, + np.int16, + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=post_clips, + nan_fill=nan_fill, + ) # Check defense against in-place modification with nans present if not nan_fill is None: data[1] = np.nan - assert_rt(data, shape, np.int16, - in_cast=in_cast, - pre_clips=pre_clips, - inter=inter, - slope=slope, - post_clips=post_clips, - nan_fill=nan_fill) + assert_rt( + data, + shape, + np.int16, + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=post_clips, + nan_fill=nan_fill, + ) def test_array_from_file_overflow(): @@ -1185,18 +1178,20 @@ def test_array_from_file_overflow(): shape = (1500,) * 6 class NoStringIO: # Null file-like for forcing error - def seek(self, n_bytes): pass def read(self, n_bytes): return b'' + try: array_from_file(shape, np.int8, NoStringIO()) except OSError as err: message = str(err) - assert message == ("Expected 11390625000000000000 bytes, got 0 " - "bytes from object\n - could the file be damaged?") + assert message == ( + 'Expected 11390625000000000000 bytes, got 0 ' + 'bytes from object\n - could the file be damaged?' + ) def test__ftype4scaled_finite_warningfilters(): @@ -1249,7 +1244,7 @@ def run(self): def _calculate_scale(data, out_dtype, allow_intercept): - """ Calculate scaling and optional intercept for data + """Calculate scaling and optional intercept for data Copy of the deprecated volumeutils.calculate_scale, to preserve tests @@ -1280,6 +1275,7 @@ def _calculate_scale(data, out_dtype, allow_intercept): if np.can_cast(in_dtype, out_dtype): return 1.0, 0.0, None, None from ..arraywriters import make_array_writer, WriterError, get_slope_inter + try: writer = make_array_writer(data, out_dtype, True, allow_intercept) except WriterError as e: diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index a360804f5a..2e4ea6a788 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test binary header objects +"""Test binary header objects This is a root testing class, used in the Analyze and other tests as a framework for all the tests common to the Analyze types @@ -42,8 +42,9 @@ INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] + def log_chk(hdr, level): - """ Utility method to check header checking / logging + """Utility method to check header checking / logging Asserts that log entry appears during ``hdr.check_fix`` for logging level below `level`. @@ -99,18 +100,16 @@ def log_chk(hdr, level): logger.removeHandler(handler) # When error level == level, check_fix should raise an error hdrc2 = hdr.copy() - raiser = (HeaderDataError, - hdrc2.check_fix, - logger, - level) + raiser = (HeaderDataError, hdrc2.check_fix, logger, level) return hdrc, message, raiser class _TestWrapStructBase(BaseTestCase): - """ Class implements base tests for binary headers + """Class implements base tests for binary headers It serves as a base class for other binary header tests """ + header_class = None def get_bad_bb(self): @@ -190,10 +189,9 @@ def test_mappingness(self): assert hdr.get(keys[0]) == falsyval assert hdr.get(keys[0], -1) == falsyval - def test_endianness_ro(self): # endianness is a read only property - """ Its use in initialization tested in the init tests. + """Its use in initialization tested in the init tests. Endianness gives endian interpretation of binary data. It is read only because the only common use case is to set the endianness on initialization (or occasionally byteswapping the @@ -237,8 +235,7 @@ def log_chk(self, hdr, level): return log_chk(hdr, level) def assert_no_log_err(self, hdr): - """ Assert that no logging or errors result from this `hdr` - """ + """Assert that no logging or errors result from this `hdr`""" fhdr, message, raiser = self.log_chk(hdr, 0) assert (fhdr, message) == (hdr, '') @@ -286,9 +283,9 @@ def test_as_byteswapped(self): # Note that contents is not rechecked on swap / copy class DC(self.header_class): - def check_fix(self, *args, **kwargs): raise Exception + # Assumes check=True default with pytest.raises(Exception): DC(hdr.binaryblock) @@ -313,15 +310,15 @@ def test_str(self): assert len(s1) > 0 - class _TestLabeledWrapStruct(_TestWrapStructBase): - """ Test a wrapstruct with value labeling """ + """Test a wrapstruct with value labeling""" def test_get_value_label(self): # Test get value label method # Make a new class to avoid overwriting recoders of original class MyHdr(self.header_class): _field_recoders = {} + hdr = MyHdr() # Key not existing raises error with pytest.raises(ValueError): @@ -351,7 +348,8 @@ class MyHdr(self.header_class): class MyWrapStruct(WrapStruct): - """ An example wrapped struct class """ + """An example wrapped struct class""" + template_dtype = np.dtype([('an_integer', 'i2'), ('a_str', 'S10')]) @classmethod @@ -369,11 +367,11 @@ def default_structarr(klass, endianness=None): @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ - return (klass._chk_integer, - klass._chk_string) + """Return sequence of check functions for this class""" + return (klass._chk_integer, klass._chk_string) """ Check functions in format expected by BatteryRunner class """ + @staticmethod def _chk_integer(hdr, fix=False): rep = Report(HeaderDataError) @@ -405,7 +403,8 @@ class MyLabeledWrapStruct(LabeledWrapStruct, MyWrapStruct): class TestMyWrapStruct(_TestWrapStructBase): - """ Test fake binary header defined at top of module """ + """Test fake binary header defined at top of module""" + header_class = MyWrapStruct def get_bad_bb(self): @@ -515,6 +514,7 @@ def test_str(self): # Make sure not to overwrite class dictionary class MyHdr(self.header_class): _field_recoders = {} + hdr = MyHdr() s1 = str(hdr) assert len(s1) > 0 diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 10b5ee78f5..c175940ff7 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Contexts for *with* statement providing temporary directories +"""Contexts for *with* statement providing temporary directories """ import os import shutil @@ -31,7 +31,7 @@ class TemporaryDirectory: False """ - def __init__(self, suffix="", prefix=template, dir=None): + def __init__(self, suffix='', prefix=template, dir=None): self.name = mkdtemp(suffix, prefix, dir) self._closed = False @@ -49,7 +49,7 @@ def __exit__(self, exc, value, tb): class InTemporaryDirectory(TemporaryDirectory): - """ Create, return, and change directory to a temporary directory + """Create, return, and change directory to a temporary directory Notes ------ @@ -82,7 +82,7 @@ def __exit__(self, exc, value, tb): class InGivenDirectory: - """ Change directory to given directory for duration of ``with`` block + """Change directory to given directory for duration of ``with`` block Useful when you want to use `InTemporaryDirectory` for the final test, but you are still debugging. For example, you may want to do this in the end: @@ -106,7 +106,7 @@ class InGivenDirectory: """ def __init__(self, path=None): - """ Initialize directory context manager + """Initialize directory context manager Parameters ---------- diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index db659df337..3b6ecfbb40 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -1,9 +1,10 @@ -""" Class to raise error for missing modules or other misfortunes +"""Class to raise error for missing modules or other misfortunes """ class TripWireError(AttributeError): - """ Exception if trying to use TripWire object """ + """Exception if trying to use TripWire object""" + # Has to be subclass of AttributeError, to work round Python 3.5 inspection # for doctests. Python 3.5 looks for a ``__wrapped__`` attribute during # initialization of doctests, and only allows AttributeError as signal this @@ -11,7 +12,7 @@ class TripWireError(AttributeError): def is_tripwire(obj): - """ Returns True if `obj` appears to be a TripWire object + """Returns True if `obj` appears to be a TripWire object Examples -------- @@ -30,7 +31,7 @@ def is_tripwire(obj): class TripWire: - """ Class raising error if used + """Class raising error if used Standard use is to proxy modules that we could not import @@ -47,5 +48,5 @@ def __init__(self, msg): self._msg = msg def __getattr__(self, attr_name): - """ Raise informative error accessing attributes """ + """Raise informative error accessing attributes""" raise TripWireError(self._msg) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 65e813ef0f..c3720d474b 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -1,4 +1,4 @@ -""" Utilities for viewing images +"""Utilities for viewing images Includes version of OrthoSlicer3D code originally written by our own Paul Ivanov. @@ -13,7 +13,7 @@ class OrthoSlicer3D: - """ Orthogonal-plane slice viewer. + """Orthogonal-plane slice viewer. OrthoSlicer3d expects 3- or 4-dimensional array data. It treats 4D data as a sequence of 3D spatial volumes, where a slice over the final @@ -39,6 +39,7 @@ class OrthoSlicer3D: >>> data = np.outer(a, b)[..., np.newaxis] * a >>> OrthoSlicer3D(data).show() # doctest: +SKIP """ + # Skip doctest above b/c not all systems have mpl installed def __init__(self, data, affine=None, axes=None, title=None): @@ -74,7 +75,7 @@ def __init__(self, data, affine=None, axes=None, title=None): if data.ndim < 3: raise ValueError('data must have at least 3 dimensions') if np.iscomplexobj(data): - raise TypeError("Complex data not supported") + raise TypeError('Complex data not supported') affine = np.array(affine, float) if affine is not None else np.eye(4) if affine.shape != (4, 4): raise ValueError('affine must be a 4x4 matrix') @@ -90,7 +91,7 @@ def __init__(self, data, affine=None, axes=None, title=None): self._volume_dims = data.shape[3:] self._current_vol_data = data[:, :, :, 0] if data.ndim > 3 else data self._data = data - self._clim = np.percentile(data, (1., 99.)) + self._clim = np.percentile(data, (1.0, 99.0)) del data if axes is None: # make the axes @@ -130,36 +131,53 @@ def __init__(self, data, affine=None, axes=None, title=None): # set up axis crosshairs self._crosshairs = [None] * 3 - r = [self._scalers[self._order[2]] / self._scalers[self._order[1]], - self._scalers[self._order[2]] / self._scalers[self._order[0]], - self._scalers[self._order[1]] / self._scalers[self._order[0]]] + r = [ + self._scalers[self._order[2]] / self._scalers[self._order[1]], + self._scalers[self._order[2]] / self._scalers[self._order[0]], + self._scalers[self._order[1]] / self._scalers[self._order[0]], + ] self._sizes = [self._data.shape[order] for order in self._order] - for ii, xax, yax, ratio, label in zip([0, 1, 2], [1, 0, 0], [2, 2, 1], - r, ('SAIP', 'SRIL', 'ARPL')): + for ii, xax, yax, ratio, label in zip( + [0, 1, 2], [1, 0, 0], [2, 2, 1], r, ('SAIP', 'SRIL', 'ARPL') + ): ax = self._axes[ii] d = np.zeros((self._sizes[yax], self._sizes[xax])) im = self._axes[ii].imshow( - d, vmin=self._clim[0], vmax=self._clim[1], aspect=1, - cmap='gray', interpolation='nearest', origin='lower') + d, + vmin=self._clim[0], + vmax=self._clim[1], + aspect=1, + cmap='gray', + interpolation='nearest', + origin='lower', + ) self._ims.append(im) - vert = ax.plot([0] * 2, [-0.5, self._sizes[yax] - 0.5], - color=(0, 1, 0), linestyle='-')[0] - horiz = ax.plot([-0.5, self._sizes[xax] - 0.5], [0] * 2, - color=(0, 1, 0), linestyle='-')[0] + vert = ax.plot( + [0] * 2, [-0.5, self._sizes[yax] - 0.5], color=(0, 1, 0), linestyle='-' + )[0] + horiz = ax.plot( + [-0.5, self._sizes[xax] - 0.5], [0] * 2, color=(0, 1, 0), linestyle='-' + )[0] self._crosshairs[ii] = dict(vert=vert, horiz=horiz) # add text labels (top, right, bottom, left) lims = [0, self._sizes[xax], 0, self._sizes[yax]] bump = 0.01 - poss = [[lims[1] / 2., lims[3]], - [(1 + bump) * lims[1], lims[3] / 2.], - [lims[1] / 2., 0], - [lims[0] - bump * lims[1], lims[3] / 2.]] - anchors = [['center', 'bottom'], ['left', 'center'], - ['center', 'top'], ['right', 'center']] + poss = [ + [lims[1] / 2.0, lims[3]], + [(1 + bump) * lims[1], lims[3] / 2.0], + [lims[1] / 2.0, 0], + [lims[0] - bump * lims[1], lims[3] / 2.0], + ] + anchors = [ + ['center', 'bottom'], + ['left', 'center'], + ['center', 'top'], + ['right', 'center'], + ] for pos, anchor, lab in zip(poss, anchors, label): - ax.text(pos[0], pos[1], lab, - horizontalalignment=anchor[0], - verticalalignment=anchor[1]) + ax.text( + pos[0], pos[1], lab, horizontalalignment=anchor[0], verticalalignment=anchor[1] + ) ax.axis(lims) ax.set_aspect(ratio) ax.patch.set_visible(False) @@ -180,14 +198,19 @@ def __init__(self, data, affine=None, axes=None, title=None): y = np.zeros(self.n_volumes + 1) x = np.arange(self.n_volumes + 1) - 0.5 step = ax.step(x, y, where='post', color='y')[0] - ax.set_xticks(np.unique(np.linspace(0, self.n_volumes - 1, - 5).astype(int))) + ax.set_xticks(np.unique(np.linspace(0, self.n_volumes - 1, 5).astype(int))) ax.set_xlim(x[0], x[-1]) yl = [self._data.min(), self._data.max()] yl = [lim + s * np.diff(lims)[0] for lim, s in zip(yl, [-1.01, 1.01])] - patch = mpl_patch.Rectangle([-0.5, yl[0]], 1., np.diff(yl)[0], - fill=True, facecolor=(0, 1, 0), - edgecolor=(0, 1, 0), alpha=0.25) + patch = mpl_patch.Rectangle( + [-0.5, yl[0]], + 1.0, + np.diff(yl)[0], + fill=True, + facecolor=(0, 1, 0), + edgecolor=(0, 1, 0), + alpha=0.25, + ) ax.add_patch(patch) ax.set_ylim(yl) self._volume_ax_objs = dict(step=step, patch=patch) @@ -202,32 +225,32 @@ def __init__(self, data, affine=None, axes=None, title=None): # actually set data meaningfully self._position = np.zeros(4) - self._position[3] = 1. # convenience for affine multiplication + self._position[3] = 1.0 # convenience for affine multiplication self._changing = False # keep track of status to avoid loops self._links = [] # other viewers this one is linked to self._plt.draw() for fig in self._figs: fig.canvas.draw() self._set_volume_index(0, update_slices=False) - self._set_position(0., 0., 0.) + self._set_position(0.0, 0.0, 0.0) self._draw() def __repr__(self): title = '' if self._title is None else f'{self._title} ' vol = '' if self.n_volumes <= 1 else f', {self.n_volumes}' - r = (f'<{self.__class__.__name__}: {title}({self._sizes[0]}, ' - f'{self._sizes[1]}, {self._sizes[2]}{vol})>') + r = ( + f'<{self.__class__.__name__}: {title}({self._sizes[0]}, ' + f'{self._sizes[1]}, {self._sizes[2]}{vol})>' + ) return r # User-level functions ################################################### def show(self): - """Show the slicer in blocking mode; convenience for ``plt.show()`` - """ + """Show the slicer in blocking mode; convenience for ``plt.show()``""" self._plt.show() def close(self): - """Close the viewer figures - """ + """Close the viewer figures""" self._cleanup() for f in self._figs: self._plt.close(f) @@ -294,8 +317,9 @@ def link_to(self, other): Other viewer to use to link movements. """ if not isinstance(other, self.__class__): - raise TypeError('other must be an instance of ' - f'{self.__class__.__name__}, not {type(other)}') + raise TypeError( + 'other must be an instance of ' f'{self.__class__.__name__}, not {type(other)}' + ) self._link(other, is_primary=True) def _link(self, other, is_primary): @@ -355,8 +379,7 @@ def _set_volume_index(self, v, update_slices=True): self._data_idx[3] = max(min(int(round(v)), max_ - 1), 0) idx = (slice(None), slice(None), slice(None)) if self._data.ndim > 3: - idx = idx + tuple(np.unravel_index(self._data_idx[3], - self._volume_dims)) + idx = idx + tuple(np.unravel_index(self._data_idx[3], self._volume_dims)) self._current_vol_data = self._data[idx] # update all of our slice plots if update_slices: @@ -381,8 +404,7 @@ def _set_position(self, x, y, z, notify=True): # sagittal: get to S/A # coronal: get to S/L # axial: get to A/L - data = np.rollaxis(self._current_vol_data, - axis=self._order[ii])[self._data_idx[ii]] + data = np.rollaxis(self._current_vol_data, axis=self._order[ii])[self._data_idx[ii]] xax = [1, 0, 0][ii] yax = [2, 2, 1][ii] if self._order[xax] < self._order[yax]: @@ -440,14 +462,14 @@ def _on_scroll(self, event): return ii = 3 # shift: change volume in any axis assert ii in range(4) - dv = 10. if event.key is not None and 'control' in event.key else 1. - dv *= 1. if event.button == 'up' else -1. + dv = 10.0 if event.key is not None and 'control' in event.key else 1.0 + dv *= 1.0 if event.button == 'up' else -1.0 dv *= -1 if self._flips[ii] else 1 val = self._data_idx[ii] + dv if ii == 3: self._set_volume_index(val) else: - coords = [self._data_idx[k] for k in range(3)] + [1.] + coords = [self._data_idx[k] for k in range(3)] + [1.0] coords[ii] = val self._set_position(*np.dot(self._affine, coords)[:3]) self._draw() @@ -468,7 +490,7 @@ def _on_mouse(self, event): x, y = event.xdata, event.ydata x = self._sizes[xax] - x if self._flips[xax] else x y = self._sizes[yax] - y if self._flips[yax] else y - idxs = [None, None, None, 1.] + idxs = [None, None, None, 1.0] idxs[xax] = x idxs[yax] = y idxs[ii] = self._data_idx[ii] @@ -479,7 +501,7 @@ def _on_keypress(self, event): """Handle mpl keypress events""" if event.key is not None and 'escape' in event.key: self.close() - elif event.key in ["=", '+']: + elif event.key in ['=', '+']: # increment volume index new_idx = min(self._data_idx[3] + 1, self.n_volumes) self._set_volume_index(new_idx, update_slices=True) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 7f18c20f3f..f026750e95 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utility functions for analyze-like formats """ +"""Utility functions for analyze-like formats""" import sys import warnings @@ -23,7 +23,7 @@ from .externals.oset import OrderedSet from .optpkg import optional_package -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' @@ -33,7 +33,8 @@ ('<', 'little', 'l', 'le', 'L', 'LE'), ('>', 'big', 'BIG', 'b', 'be', 'B', 'BE'), (native_code, 'native', 'n', 'N', '=', '|', 'i', 'I'), - (swapped_code, 'swapped', 's', 'S', '!')) + (swapped_code, 'swapped', 's', 'S', '!'), +) # We'll put these into the Recoder class after we define it #: default compression level when writing gz and bz2 files @@ -48,7 +49,7 @@ class Recoder: - """ class to return canonical code(s) from code or aliases + """class to return canonical code(s) from code or aliases The concept is a lot easier to read in the implementation and tests than it is to explain, so... @@ -82,7 +83,7 @@ class Recoder: """ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): - """ Create recoder object + """Create recoder object ``codes`` give a sequence of code, alias sequences ``fields`` are names by which the entries in these sequences can be @@ -120,7 +121,7 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): self.add_codes(codes) def add_codes(self, code_syn_seqs): - """ Add codes to object + """Add codes to object Parameters ---------- @@ -154,7 +155,7 @@ def add_codes(self, code_syn_seqs): self.__dict__[field_name][alias] = code_syns[field_ind] def __getitem__(self, key): - """ Return value from field1 dictionary (first column of values) + """Return value from field1 dictionary (first column of values) Returns same value as ``obj.field1[key]`` and, with the default initializing ``fields`` argument of fields=('code',), @@ -167,8 +168,7 @@ def __getitem__(self, key): return self.field1[key] def __contains__(self, key): - """ True if field1 in recoder contains `key` - """ + """True if field1 in recoder contains `key`""" try: self.field1[key] except KeyError: @@ -176,7 +176,7 @@ def __contains__(self, key): return True def keys(self): - """ Return all available code and alias values + """Return all available code and alias values Returns same value as ``obj.field1.keys()`` and, with the default initializing ``fields`` argument of fields=('code',), @@ -190,7 +190,7 @@ def keys(self): return self.field1.keys() def value_set(self, name=None): - """ Return OrderedSet of possible returned values for column + """Return OrderedSet of possible returned values for column By default, the column is the first column. @@ -224,7 +224,7 @@ def value_set(self, name=None): class DtypeMapper: - """ Specialized mapper for numpy dtypes + """Specialized mapper for numpy dtypes We pass this mapper into the Recoder class to deal with numpy dtype hashing. @@ -252,7 +252,7 @@ def values(self): return self._dict.values() def __setitem__(self, key, value): - """ Set item into mapping, checking for dtype keys + """Set item into mapping, checking for dtype keys Cache dtype keys for comparison test in __getitem__ """ @@ -261,7 +261,7 @@ def __setitem__(self, key, value): self._dtype_keys.append(key) def __getitem__(self, key): - """ Get item from mapping, checking for dtype keys + """Get item from mapping, checking for dtype keys First do simple hash lookup, then check for a dtype key that has failed the hash lookup. Look then for any known dtype keys that compare equal @@ -279,7 +279,7 @@ def __getitem__(self, key): def pretty_mapping(mapping, getterfunc=None): - """ Make pretty string from mapping + """Make pretty string from mapping Adjusts text column to print values on basis of longest key. Probably only sensible if keys are mainly strings. @@ -339,7 +339,7 @@ def pretty_mapping(mapping, getterfunc=None): def make_dt_codes(codes_seqs): - """ Create full dt codes Recoder instance from datatype codes + """Create full dt codes Recoder instance from datatype codes Include created numpy dtype (from numpy type) and opposite endian numpy dtype @@ -379,13 +379,12 @@ def make_dt_codes(codes_seqs): def _is_compressed_fobj(fobj): - """ Return True if fobj represents a compressed data file-like object - """ + """Return True if fobj represents a compressed data file-like object""" return isinstance(fobj, COMPRESSED_FILE_LIKES) def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): - """ Get array from file with specified shape, dtype and file offset + """Get array from file with specified shape, dtype and file offset Parameters ---------- @@ -428,8 +427,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): True """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " - "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") if mmap is True: mmap = 'c' in_dtype = np.dtype(in_dtype) @@ -437,12 +435,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): infile = getattr(infile, 'fobj', infile) if mmap and not _is_compressed_fobj(infile): try: # Try memmapping file on disk - return np.memmap(infile, - in_dtype, - mode=mmap, - shape=shape, - order=order, - offset=offset) + return np.memmap(infile, in_dtype, mode=mmap, shape=shape, order=order, offset=offset) # The error raised by memmap, for different file types, has # changed in different incarnations of the numpy routine except (AttributeError, TypeError, ValueError): @@ -464,8 +457,10 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): n_read = len(data_bytes) needs_copy = True if n_bytes != n_read: - raise OSError(f"Expected {n_bytes} bytes, got {n_read} bytes from " - f"{getattr(infile, 'name', 'object')}\n - could the file be damaged?") + raise OSError( + f'Expected {n_bytes} bytes, got {n_read} bytes from ' + f"{getattr(infile, 'name', 'object')}\n - could the file be damaged?" + ) arr = np.ndarray(shape, in_dtype, buffer=data_bytes, order=order) if needs_copy: return arr.copy() @@ -473,10 +468,19 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): return arr -def array_to_file(data, fileobj, out_dtype=None, offset=0, - intercept=0.0, divslope=1.0, - mn=None, mx=None, order='F', nan2zero=True): - """ Helper function for writing arrays to file objects +def array_to_file( + data, + fileobj, + out_dtype=None, + offset=0, + intercept=0.0, + divslope=1.0, + mn=None, + mx=None, + order='F', + nan2zero=True, +): + """Helper function for writing arrays to file objects Writes arrays as scaled by `intercept` and `divslope`, and clipped at (prescaling) `mn` minimum, and `mx` maximum. @@ -558,8 +562,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, """ # Shield special case div_none = divslope is None - if not np.all( - np.isfinite((intercept, 1.0 if div_none else divslope))): + if not np.all(np.isfinite((intercept, 1.0 if div_none else divslope))): raise ValueError('divslope and intercept must be finite') if divslope == 0: raise ValueError('divslope cannot be zero') @@ -571,15 +574,14 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, out_dtype = np.dtype(out_dtype) if offset is not None: seek_tell(fileobj, offset) - if (div_none or (mn, mx) == (0, 0) or - ((mn is not None and mx is not None) and mx < mn)): + if div_none or (mn, mx) == (0, 0) or ((mn is not None and mx is not None) and mx < mn): write_zeros(fileobj, data.size * out_dtype.itemsize) return if order not in 'FC': raise ValueError('Order should be one of F or C') # Simple cases pre_clips = None if (mn is None and mx is None) else (mn, mx) - null_scaling = (intercept == 0 and divslope == 1) + null_scaling = intercept == 0 and divslope == 1 if in_dtype.type == np.void: if not null_scaling: raise ValueError('Cannot scale non-numeric types') @@ -589,8 +591,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, if pre_clips is not None: pre_clips = _dt_min_max(in_dtype, *pre_clips) if null_scaling and np.can_cast(in_dtype, out_dtype): - return _write_data(data, fileobj, out_dtype, order, - pre_clips=pre_clips) + return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # Force upcasting for floats by making atleast_1d. slope, inter = [np.atleast_1d(v) for v in (divslope, intercept)] # Default working point type for applying slope / inter @@ -601,10 +602,9 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, in_kind = in_dtype.kind out_kind = out_dtype.kind if out_kind in 'fc': - return _write_data(data, fileobj, out_dtype, order, - slope=slope, - inter=inter, - pre_clips=pre_clips) + return _write_data( + data, fileobj, out_dtype, order, slope=slope, inter=inter, pre_clips=pre_clips + ) assert out_kind in 'iu' if in_kind in 'iu': if null_scaling: @@ -613,8 +613,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, mn, mx = _dt_min_max(in_dtype, mn, mx) mn_out, mx_out = _dt_min_max(out_dtype) pre_clips = max(mn, mn_out), min(mx, mx_out) - return _write_data(data, fileobj, out_dtype, order, - pre_clips=pre_clips) + return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # In any case, we do not want to check for nans because we've already # disallowed scaling that generates nans nan2zero = False @@ -677,38 +676,48 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, # slope). Assume errors are for working float type. Round for integer # rounding est_err = np.round(2 * np.finfo(w_type).eps * abs(inter / slope)) - if ((nan_fill < both_mn and abs(nan_fill - both_mn) < est_err) or - (nan_fill > both_mx and abs(nan_fill - both_mx) < est_err)): + if (nan_fill < both_mn and abs(nan_fill - both_mn) < est_err) or ( + nan_fill > both_mx and abs(nan_fill - both_mx) < est_err + ): # nan_fill can be (just) outside clip range nan_fill = np.clip(nan_fill, both_mn, both_mx) else: - raise ValueError(f"nan_fill == {nan_fill}, outside safe int range " - f"({int(both_mn)}-{int(both_mx)}); " - "change scaling or set nan2zero=False?") + raise ValueError( + f'nan_fill == {nan_fill}, outside safe int range ' + f'({int(both_mn)}-{int(both_mx)}); ' + 'change scaling or set nan2zero=False?' + ) # Make sure non-nan output clipped to shared range post_mn = np.max([post_mn, both_mn]) post_mx = np.min([post_mx, both_mx]) in_cast = None if cast_in_dtype == in_dtype else cast_in_dtype - return _write_data(data, fileobj, out_dtype, order, - in_cast=in_cast, - pre_clips=pre_clips, - inter=inter, - slope=slope, - post_clips=(post_mn, post_mx), - nan_fill=nan_fill if nan2zero else None) - - -def _write_data(data, - fileobj, - out_dtype, - order, - in_cast=None, - pre_clips=None, - inter=0., - slope=1., - post_clips=None, - nan_fill=None): - """ Write array `data` to `fileobj` as `out_dtype` type, layout `order` + return _write_data( + data, + fileobj, + out_dtype, + order, + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=(post_mn, post_mx), + nan_fill=nan_fill if nan2zero else None, + ) + + +def _write_data( + data, + fileobj, + out_dtype, + order, + in_cast=None, + pre_clips=None, + inter=0.0, + slope=1.0, + post_clips=None, + nan_fill=None, +): + """Write array `data` to `fileobj` as `out_dtype` type, layout `order` Does not modify `data` in-place. @@ -741,8 +750,7 @@ def _write_data(data, data = np.atleast_2d(data) elif order == 'F': data = data.T - nan_need_copy = ((pre_clips, in_cast, inter, slope, post_clips) == - (None, None, 0, 1, None)) + nan_need_copy = (pre_clips, in_cast, inter, slope, post_clips) == (None, None, 0, 1, None) for dslice in data: # cycle over first dimension to save memory if pre_clips is not None: dslice = np.clip(dslice, *pre_clips) @@ -773,20 +781,15 @@ def _dt_min_max(dtype_like, mn=None, mx=None): info = np.iinfo(dt) dt_mn, dt_mx = (info.min, info.max) else: - raise ValueError("unknown dtype") + raise ValueError('unknown dtype') return dt_mn if mn is None else mn, dt_mx if mx is None else mx -_CSIZE2FLOAT = { - 8: np.float32, - 16: np.float64, - 24: np.longdouble, - 32: np.longdouble} +_CSIZE2FLOAT = {8: np.float32, 16: np.float64, 24: np.longdouble, 32: np.longdouble} def _matching_float(np_type): - """ Return floating point type matching `np_type` - """ + """Return floating point type matching `np_type`""" dtype = np.dtype(np_type) if dtype.kind not in 'cf': raise ValueError('Expecting float or complex type as input') @@ -796,7 +799,7 @@ def _matching_float(np_type): def write_zeros(fileobj, count, block_size=8194): - """ Write `count` zero bytes to `fileobj` + """Write `count` zero bytes to `fileobj` Parameters ---------- @@ -816,7 +819,7 @@ def write_zeros(fileobj, count, block_size=8194): def seek_tell(fileobj, offset, write0=False): - """ Seek in `fileobj` or check we're in the right place already + """Seek in `fileobj` or check we're in the right place already Parameters ---------- @@ -846,7 +849,7 @@ def seek_tell(fileobj, offset, write0=False): def apply_read_scaling(arr, slope=None, inter=None): - """ Apply scaling in `slope` and `inter` to array `arr` + """Apply scaling in `slope` and `inter` to array `arr` This is for loading the array from a file (as opposed to the reverse scaling when saving an array to file) @@ -889,7 +892,7 @@ def apply_read_scaling(arr, slope=None, inter=None): # int to float; get enough precision to avoid infs # Find floating point type for which scaling does not overflow, # starting at given type - default = (slope.dtype.type if slope.dtype.kind == 'f' else np.float64) + default = slope.dtype.type if slope.dtype.kind == 'f' else np.float64 ftype = int_scinter_ftype(arr.dtype, slope, inter, default) slope = slope.astype(ftype) inter = inter.astype(ftype) @@ -901,7 +904,7 @@ def apply_read_scaling(arr, slope=None, inter=None): def working_type(in_type, slope=1.0, inter=0.0): - """ Return array type from applying `slope`, `inter` to array of `in_type` + """Return array type from applying `slope`, `inter` to array of `in_type` Numpy type that results from an array of type `in_type` being combined with `slope` and `inter`. It returns something like the dtype type of @@ -944,7 +947,7 @@ def working_type(in_type, slope=1.0, inter=0.0): def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): - """ float type containing int type `ifmt` * `slope` + `inter` + """float type containing int type `ifmt` * `slope` + `inter` Return float type that can represent the max and the min of the `ifmt` type after multiplication with `slope` and addition of `inter` with something @@ -996,7 +999,7 @@ def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): def best_write_scale_ftype(arr, slope=1.0, inter=0.0, default=np.float32): - """ Smallest float type to contain range of ``arr`` after scaling + """Smallest float type to contain range of ``arr`` after scaling Scaling that will be applied to ``arr`` is ``(arr - inter) / slope``. @@ -1060,7 +1063,7 @@ def best_write_scale_ftype(arr, slope=1.0, inter=0.0, default=np.float32): def better_float_of(first, second, default=np.float32): - """ Return more capable float type of `first` and `second` + """Return more capable float type of `first` and `second` Return `default` if neither of `first` or `second` is a float @@ -1105,10 +1108,8 @@ def better_float_of(first, second, default=np.float32): return second.type -def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', - default=np.float32): - """ Smallest float type for scaling of `tst_arr` that does not overflow - """ +def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', default=np.float32): + """Smallest float type for scaling of `tst_arr` that does not overflow""" assert direction in ('read', 'write') if default not in OK_FLOATS and default is np.longdouble: # Omitted longdouble @@ -1146,7 +1147,7 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', def finite_range(arr, check_nan=False): - """ Get range (min, max) or range and flag (min, max, has_nan) from `arr` + """Get range (min, max) or range and flag (min, max, has_nan) from `arr` Parameters ---------- @@ -1242,7 +1243,7 @@ def finite_range(arr, check_nan=False): def shape_zoom_affine(shape, zooms, x_flip=True): - """ Get affine implied by given shape and zooms + """Get affine implied by given shape and zooms We get the translations from the center of the image (implied by `shape`). @@ -1304,7 +1305,7 @@ def shape_zoom_affine(shape, zooms, x_flip=True): def rec2dict(rec): - """ Convert recarray to dictionary + """Convert recarray to dictionary Also converts scalar values to scalars @@ -1337,7 +1338,7 @@ def rec2dict(rec): def fname_ext_ul_case(fname): - """ `fname` with ext changed to upper / lower case if file exists + """`fname` with ext changed to upper / lower case if file exists Check for existence of `fname`. If it does exist, return unmodified. If it doesn't, check for existence of `fname` with case changed from lower to diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index b933892565..cdc2957dab 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Class to wrap numpy structured array +"""Class to wrap numpy structured array ============ wrapstruct @@ -111,8 +111,7 @@ """ import numpy as np -from .volumeutils import (pretty_mapping, endian_codes, native_code, - swapped_code) +from .volumeutils import pretty_mapping, endian_codes, native_code, swapped_code from . import imageglobals as imageglobals from .batteryrunners import BatteryRunner @@ -125,11 +124,8 @@ class WrapStruct: # placeholder datatype template_dtype = np.dtype([('integer', 'i2')]) - def __init__(self, - binaryblock=None, - endianness=None, - check=True): - """ Initialize WrapStruct from binary data block + def __init__(self, binaryblock=None, endianness=None, check=True): + """Initialize WrapStruct from binary data block Parameters ---------- @@ -160,8 +156,7 @@ def __init__(self, # check size if len(binaryblock) != self.template_dtype.itemsize: raise WrapStructError('Binary block is wrong size') - wstr = np.ndarray(shape=(), dtype=self.template_dtype, - buffer=binaryblock) + wstr = np.ndarray(shape=(), dtype=self.template_dtype, buffer=binaryblock) if endianness is None: endianness = self.__class__.guessed_endian(wstr) else: @@ -175,7 +170,7 @@ def __init__(self, @classmethod def from_fileobj(klass, fileobj, endianness=None, check=True): - """ Return read structure with given or guessed endiancode + """Return read structure with given or guessed endiancode Parameters ---------- @@ -194,7 +189,7 @@ def from_fileobj(klass, fileobj, endianness=None, check=True): @property def binaryblock(self): - """ binary block of data as string + """binary block of data as string Returns ------- @@ -211,7 +206,7 @@ def binaryblock(self): return self._structarr.tobytes() def write_to(self, fileobj): - """ Write structure to fileobj + """Write structure to fileobj Write starts at fileobj current file position. @@ -237,7 +232,7 @@ def write_to(self, fileobj): @property def endianness(self): - """ endian code of binary data + """endian code of binary data The endianness code gives the current byte order interpretation of the binary data. @@ -261,7 +256,7 @@ def endianness(self): return swapped_code def copy(self): - """ Return copy of structure + """Return copy of structure >>> wstr = WrapStruct() >>> wstr['integer'] = 3 @@ -274,7 +269,7 @@ def copy(self): return self.__class__(self.binaryblock, self.endianness, check=False) def __eq__(self, other): - """ equality between two structures defined by binaryblock + """equality between two structures defined by binaryblock Examples -------- @@ -302,7 +297,7 @@ def __ne__(self, other): return not self == other def __getitem__(self, item): - """ Return values from structure data + """Return values from structure data Examples -------- @@ -313,7 +308,7 @@ def __getitem__(self, item): return self._structarr[item] def __setitem__(self, item, value): - """ Set values in structured data + """Set values in structured data Examples -------- @@ -328,24 +323,24 @@ def __iter__(self): return iter(self.keys()) def keys(self): - """ Return keys from structured data""" + """Return keys from structured data""" return list(self.template_dtype.names) def values(self): - """ Return values from structured data""" + """Return values from structured data""" data = self._structarr return [data[key] for key in self.template_dtype.names] def items(self): - """ Return items from structured data""" + """Return items from structured data""" return zip(self.keys(), self.values()) def get(self, k, d=None): - """ Return value for the key k if present or d otherwise""" + """Return value for the key k if present or d otherwise""" return self._structarr[k] if k in self.keys() else d def check_fix(self, logger=None, error_level=None): - """ Check structured data with checks + """Check structured data with checks Parameters ---------- @@ -365,16 +360,15 @@ def check_fix(self, logger=None, error_level=None): @classmethod def diagnose_binaryblock(klass, binaryblock, endianness=None): - """ Run checks over binary data, return string """ + """Run checks over binary data, return string""" wstr = klass(binaryblock, endianness=endianness, check=False) battrun = BatteryRunner(klass._get_checks()) reports = battrun.check_only(wstr) - return '\n'.join([report.message - for report in reports if report.message]) + return '\n'.join([report.message for report in reports if report.message]) @classmethod def guessed_endian(self, mapping): - """ Guess intended endianness from mapping-like ``mapping`` + """Guess intended endianness from mapping-like ``mapping`` Parameters ---------- @@ -391,8 +385,7 @@ def guessed_endian(self, mapping): @classmethod def default_structarr(klass, endianness=None): - """ Return structured array for default structure with given endianness - """ + """Return structured array for default structure with given endianness""" dt = klass.template_dtype if endianness is not None: endianness = endian_codes[endianness] @@ -401,7 +394,7 @@ def default_structarr(klass, endianness=None): @property def structarr(self): - """ Structured data, with data fields + """Structured data, with data fields Examples -------- @@ -415,12 +408,12 @@ def structarr(self): return self._structarr def __str__(self): - """ Return string representation for printing """ + """Return string representation for printing""" summary = f"{self.__class__} object, endian='{self.endianness}'" return '\n'.join([summary, pretty_mapping(self)]) def as_byteswapped(self, endianness=None): - """ return new byteswapped object with given ``endianness`` + """return new byteswapped object with given ``endianness`` Guaranteed to make a copy even if endianness is the same as the current endianness. @@ -482,17 +475,17 @@ def as_byteswapped(self, endianness=None): @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ + """Return sequence of check functions for this class""" return () class LabeledWrapStruct(WrapStruct): - """ A WrapStruct with some fields having value labels for printing etc - """ + """A WrapStruct with some fields having value labels for printing etc""" + _field_recoders = {} # for recoding values for str def get_value_label(self, fieldname): - """ Returns label for coded field + """Returns label for coded field A coded field is an int field containing codes that stand for discrete values that also have string labels. @@ -535,7 +528,7 @@ def get_value_label(self, fieldname): return f'' def __str__(self): - """ Return string representation for printing """ + """Return string representation for printing""" summary = f"{self.__class__} object, endian='{self.endianness}'" def _getter(obj, key): diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index d907f95e10..67e10cd152 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -18,25 +18,25 @@ class XmlSerializable: - """ Basic interface for serializing an object to xml""" + """Basic interface for serializing an object to xml""" def _to_xml_element(self): - """ Output should be a xml.etree.ElementTree.Element""" + """Output should be a xml.etree.ElementTree.Element""" raise NotImplementedError() def to_xml(self, enc='utf-8'): - """ Output should be an xml string with the given encoding. + """Output should be an xml string with the given encoding. (default: utf-8)""" ele = self._to_xml_element() return '' if ele is None else tostring(ele, enc) class XmlBasedHeader(FileBasedHeader, XmlSerializable): - """ Basic wrapper around FileBasedHeader and XmlSerializable.""" + """Basic wrapper around FileBasedHeader and XmlSerializable.""" class XmlParser: - """ Base class for defining how to parse xml-based image snippets. + """Base class for defining how to parse xml-based image snippets. Image-specific parsers should define: StartElementHandler @@ -44,9 +44,7 @@ class XmlParser: CharacterDataHandler """ - HANDLER_NAMES = ['StartElementHandler', - 'EndElementHandler', - 'CharacterDataHandler'] + HANDLER_NAMES = ['StartElementHandler', 'EndElementHandler', 'CharacterDataHandler'] def __init__(self, encoding='utf-8', buffer_size=35000000, verbose=0): """ From 5d0481d33a5691bf82538dff1da541d2f29a0e14 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 21:55:26 -0500 Subject: [PATCH 131/702] STY: Reorder imports and guard against oversorting --- nibabel/__init__.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index ad14fc52dc..a816937dd2 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -43,6 +43,11 @@ from . import spm2analyze as spm2 from . import nifti1 as ni1 from . import ecat +from . import mriutils +from . import streamlines +from . import viewers + +# isort: split # object imports from .fileholders import FileHolder, FileHolderError @@ -67,9 +72,8 @@ aff2axcodes, ) from .imageclasses import all_image_classes -from . import mriutils -from . import streamlines -from . import viewers + +# isort: split from .pkg_info import get_pkg_info as _get_pkg_info From 0ab2856cac4d4baae7ab3e2f6d58421db55d807f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 21:32:00 -0500 Subject: [PATCH 132/702] STY: isort [git-blame-ignore-rev] --- nibabel/__init__.py | 40 ++++++----- nibabel/affines.py | 4 +- nibabel/analyze.py | 20 +++--- nibabel/arrayproxy.py | 9 ++- nibabel/arraywriters.py | 10 +-- nibabel/benchmarks/bench_array_to_file.py | 5 +- .../benchmarks/bench_arrayproxy_slicing.py | 9 +-- nibabel/benchmarks/bench_fileslice.py | 6 +- nibabel/benchmarks/bench_finite_range.py | 6 +- nibabel/benchmarks/bench_load_save.py | 7 +- nibabel/brikhead.py | 4 +- nibabel/casting.py | 2 +- nibabel/cifti2/__init__.py | 18 ++--- nibabel/cifti2/cifti2.py | 12 ++-- nibabel/cifti2/cifti2_axes.py | 6 +- nibabel/cifti2/parse_cifti2.py | 26 ++++---- nibabel/cifti2/tests/test_axes.py | 6 +- nibabel/cifti2/tests/test_cifti2.py | 8 +-- nibabel/cifti2/tests/test_cifti2io_axes.py | 10 +-- nibabel/cifti2/tests/test_cifti2io_header.py | 13 ++-- nibabel/cifti2/tests/test_new_cifti2.py | 4 +- nibabel/cmdline/convert.py | 2 +- nibabel/cmdline/dicomfs.py | 12 ++-- nibabel/cmdline/diff.py | 6 +- nibabel/cmdline/ls.py | 4 +- nibabel/cmdline/parrec2nii.py | 20 +++--- nibabel/cmdline/roi.py | 5 +- nibabel/cmdline/stats.py | 3 +- nibabel/cmdline/tck2trk.py | 5 +- nibabel/cmdline/tests/test_conform.py | 2 +- nibabel/cmdline/tests/test_convert.py | 5 +- nibabel/cmdline/tests/test_parrec2nii.py | 9 +-- nibabel/cmdline/tests/test_roi.py | 11 ++-- nibabel/cmdline/tests/test_stats.py | 7 +- nibabel/cmdline/tests/test_utils.py | 11 ++-- nibabel/cmdline/trk2tck.py | 2 +- nibabel/data.py | 10 +-- nibabel/dataobj_images.py | 2 +- nibabel/deprecator.py | 2 +- nibabel/dft.py | 12 ++-- nibabel/ecat.py | 6 +- nibabel/eulerangles.py | 2 - nibabel/filebasedimages.py | 3 +- nibabel/fileslice.py | 6 +- nibabel/freesurfer/__init__.py | 10 +-- nibabel/freesurfer/io.py | 8 +-- nibabel/freesurfer/mghformat.py | 13 ++-- nibabel/freesurfer/tests/test_io.py | 29 ++++---- nibabel/freesurfer/tests/test_mghformat.py | 25 +++---- nibabel/funcs.py | 2 +- nibabel/gifti/__init__.py | 8 +-- nibabel/gifti/gifti.py | 11 ++-- nibabel/gifti/parse_gifti_fast.py | 12 ++-- nibabel/gifti/tests/test_gifti.py | 19 +++--- nibabel/gifti/tests/test_parse_gifti_fast.py | 20 +++--- nibabel/imageclasses.py | 6 +- nibabel/imagestats.py | 1 + nibabel/loadsave.py | 9 +-- nibabel/minc1.py | 3 +- nibabel/minc2.py | 2 +- nibabel/nicom/ascconv.py | 3 +- nibabel/nicom/dicomreaders.py | 2 +- nibabel/nicom/dicomwrappers.py | 5 +- nibabel/nicom/tests/__init__.py | 1 + nibabel/nicom/tests/test_ascconv.py | 6 +- nibabel/nicom/tests/test_csareader.py | 11 ++-- nibabel/nicom/tests/test_dicomreaders.py | 8 +-- nibabel/nicom/tests/test_dicomwrappers.py | 21 +++--- nibabel/nicom/tests/test_dwiparams.py | 7 +- nibabel/nicom/tests/test_structreader.py | 2 +- nibabel/nicom/tests/test_utils.py | 3 +- nibabel/nifti1.py | 12 ++-- nibabel/nifti2.py | 2 +- nibabel/openers.py | 3 +- nibabel/optpkg.py | 1 + nibabel/parrec.py | 15 +++-- nibabel/pkg_info.py | 2 + nibabel/processing.py | 6 +- nibabel/pydicom_compat.py | 5 +- nibabel/quaternions.py | 1 + nibabel/spatialimages.py | 6 +- nibabel/spm99analyze.py | 7 +- nibabel/streamlines/__init__.py | 7 +- nibabel/streamlines/array_sequence.py | 2 +- nibabel/streamlines/tck.py | 6 +- .../streamlines/tests/test_array_sequence.py | 11 ++-- nibabel/streamlines/tests/test_streamlines.py | 21 +++--- nibabel/streamlines/tests/test_tck.py | 19 +++--- nibabel/streamlines/tests/test_tractogram.py | 23 ++++--- .../streamlines/tests/test_tractogram_file.py | 4 +- nibabel/streamlines/tests/test_trk.py | 18 +++-- nibabel/streamlines/tests/test_utils.py | 8 +-- nibabel/streamlines/tractogram.py | 5 +- nibabel/streamlines/trk.py | 11 ++-- nibabel/testing/__init__.py | 18 +++-- nibabel/tests/data/check_parrec_reslice.py | 1 + nibabel/tests/data/gen_standard.py | 2 +- nibabel/tests/data/make_moved_anat.py | 2 +- nibabel/tests/nibabel_data.py | 7 +- nibabel/tests/scriptrunner.py | 10 +-- nibabel/tests/test_affines.py | 16 ++--- nibabel/tests/test_analyze.py | 33 +++++----- nibabel/tests/test_api_validators.py | 1 + nibabel/tests/test_arrayproxy.py | 24 +++---- nibabel/tests/test_arraywriters.py | 23 ++++--- nibabel/tests/test_batteryrunners.py | 4 +- nibabel/tests/test_brikhead.py | 7 +- nibabel/tests/test_casting.py | 22 +++---- nibabel/tests/test_data.py | 26 ++++---- nibabel/tests/test_dataobj_images.py | 5 +- nibabel/tests/test_deprecated.py | 5 +- nibabel/tests/test_deprecator.py | 10 +-- nibabel/tests/test_dft.py | 10 ++- nibabel/tests/test_diff.py | 7 +- nibabel/tests/test_ecat.py | 16 ++--- nibabel/tests/test_ecat_data.py | 5 +- nibabel/tests/test_environment.py | 6 +- nibabel/tests/test_euler.py | 6 +- nibabel/tests/test_filebasedimages.py | 3 +- nibabel/tests/test_filehandles.py | 4 +- nibabel/tests/test_fileholders.py | 1 - nibabel/tests/test_filename_parser.py | 4 +- nibabel/tests/test_files_interface.py | 10 +-- nibabel/tests/test_fileslice.py | 29 ++++---- nibabel/tests/test_fileutils.py | 3 +- nibabel/tests/test_floating.py | 22 +++---- nibabel/tests/test_funcs.py | 10 ++- nibabel/tests/test_image_api.py | 56 ++++++++-------- nibabel/tests/test_image_load_save.py | 39 ++++++----- nibabel/tests/test_image_types.py | 18 ++--- nibabel/tests/test_imageclasses.py | 10 ++- nibabel/tests/test_imagestats.py | 3 +- nibabel/tests/test_init.py | 8 ++- nibabel/tests/test_loadsave.py | 23 ++++--- nibabel/tests/test_minc1.py | 22 +++---- nibabel/tests/test_minc2.py | 2 - nibabel/tests/test_minc2_data.py | 8 +-- nibabel/tests/test_mriutils.py | 4 +- nibabel/tests/test_nibabel_data.py | 5 +- nibabel/tests/test_nifti1.py | 40 +++++------ nibabel/tests/test_nifti2.py | 11 ++-- nibabel/tests/test_onetime.py | 1 + nibabel/tests/test_openers.py | 26 +++----- nibabel/tests/test_optpkg.py | 9 ++- nibabel/tests/test_orientations.py | 21 +++--- nibabel/tests/test_parrec.py | 31 ++++----- nibabel/tests/test_parrec_data.py | 14 ++-- nibabel/tests/test_pkg_info.py | 4 +- nibabel/tests/test_processing.py | 31 ++++----- nibabel/tests/test_proxy_api.py | 37 +++++------ nibabel/tests/test_quaternions.py | 6 +- nibabel/tests/test_recoder.py | 5 +- nibabel/tests/test_removalschedule.py | 4 +- nibabel/tests/test_round_trip.py | 12 ++-- nibabel/tests/test_rstutils.py | 3 +- nibabel/tests/test_scaling.py | 14 ++-- nibabel/tests/test_scripts.py | 29 ++++---- nibabel/tests/test_spaces.py | 9 ++- nibabel/tests/test_spatialimages.py | 17 ++--- nibabel/tests/test_spm2analyze.py | 7 +- nibabel/tests/test_spm99analyze.py | 21 +++--- nibabel/tests/test_testing.py | 14 ++-- nibabel/tests/test_tmpdirs.py | 3 +- nibabel/tests/test_tripwire.py | 4 +- nibabel/tests/test_viewers.py | 8 +-- nibabel/tests/test_volumeutils.py | 66 +++++++++---------- nibabel/tests/test_wrapstruct.py | 17 ++--- nibabel/tmpdirs.py | 2 +- nibabel/viewers.py | 3 +- nibabel/volumeutils.py | 10 +-- nibabel/wrapstruct.py | 2 +- 171 files changed, 874 insertions(+), 971 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index a816937dd2..4311e3d7bf 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -9,8 +9,8 @@ import os -from .pkg_info import __version__ from .info import long_description as __doc__ +from .pkg_info import __version__ __doc__ += """ Quickstart @@ -39,39 +39,37 @@ # module imports from . import analyze as ana -from . import spm99analyze as spm99 -from . import spm2analyze as spm2 +from . import ecat, mriutils from . import nifti1 as ni1 -from . import ecat -from . import mriutils -from . import streamlines -from . import viewers +from . import spm2analyze as spm2 +from . import spm99analyze as spm99 +from . import streamlines, viewers # isort: split # object imports +from .analyze import AnalyzeHeader, AnalyzeImage +from .arrayproxy import is_proxy +from .cifti2 import Cifti2Header, Cifti2Image from .fileholders import FileHolder, FileHolderError +from .freesurfer import MGHImage +from .funcs import as_closest_canonical, concat_images, four_to_three, squeeze_image +from .gifti import GiftiImage +from .imageclasses import all_image_classes from .loadsave import load, save -from .arrayproxy import is_proxy -from .analyze import AnalyzeHeader, AnalyzeImage -from .spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage -from .spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage -from .nifti1 import Nifti1Header, Nifti1Image, Nifti1Pair -from .nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair from .minc1 import Minc1Image from .minc2 import Minc2Image -from .cifti2 import Cifti2Header, Cifti2Image -from .gifti import GiftiImage -from .freesurfer import MGHImage -from .funcs import squeeze_image, concat_images, four_to_three, as_closest_canonical +from .nifti1 import Nifti1Header, Nifti1Image, Nifti1Pair +from .nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair from .orientations import ( - io_orientation, - flip_axis, OrientationError, - apply_orientation, aff2axcodes, + apply_orientation, + flip_axis, + io_orientation, ) -from .imageclasses import all_image_classes +from .spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage +from .spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage # isort: split diff --git a/nibabel/affines.py b/nibabel/affines.py index c8bc586aa7..59b52e768e 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -2,10 +2,10 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Utility routines for working with points and affine transforms """ -import numpy as np - from functools import reduce +import numpy as np + class AffineError(ValueError): """Errors in calculating or using affines""" diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 648c75d68a..4a76350d59 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -84,21 +84,21 @@ import numpy as np +from .arrayproxy import ArrayProxy +from .arraywriters import ArrayWriter, WriterError, get_slope_inter, make_array_writer +from .batteryrunners import Report +from .fileholders import copy_file_map +from .spatialimages import HeaderDataError, HeaderTypeError, SpatialImage from .volumeutils import ( - native_code, - swapped_code, - make_dt_codes, - shape_zoom_affine, + apply_read_scaling, array_from_file, + make_dt_codes, + native_code, seek_tell, - apply_read_scaling, + shape_zoom_affine, + swapped_code, ) -from .arraywriters import make_array_writer, get_slope_inter, WriterError, ArrayWriter from .wrapstruct import LabeledWrapStruct -from .spatialimages import HeaderDataError, HeaderTypeError, SpatialImage -from .fileholders import copy_file_map -from .batteryrunners import Report -from .arrayproxy import ArrayProxy # Sub-parts of standard analyze header from # Mayo dbh.h file diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index bb97b8efb0..5a2bae02c0 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -25,16 +25,15 @@ See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks. """ +import warnings from contextlib import contextmanager from threading import RLock -import warnings import numpy as np -from .volumeutils import array_from_file, apply_read_scaling -from .fileslice import fileslice, canonical_slicers from . import openers - +from .fileslice import canonical_slicers, fileslice +from .volumeutils import apply_read_scaling, array_from_file """This flag controls whether a new file handle is created every time an image is accessed through an ``ArrayProxy``, or a single file handle is created and @@ -413,8 +412,8 @@ def reshape(self, shape): size = np.prod(self._shape) # Calculate new shape if not fully specified - from operator import mul from functools import reduce + from operator import mul n_unknowns = len([e for e in shape if e == -1]) if n_unknowns > 1: diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 1a80bcfa98..59e55b314c 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -32,15 +32,15 @@ def __init__(self, array, out_dtype=None) import numpy as np from .casting import ( - int_to_float, as_int, - int_abs, - type_info, - floor_exact, best_float, + floor_exact, + int_abs, + int_to_float, shared_range, + type_info, ) -from .volumeutils import finite_range, array_to_file +from .volumeutils import array_to_file, finite_range class WriterError(Exception): diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index 7b59fbcaec..c2bab7e95e 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -14,13 +14,12 @@ from io import BytesIO # NOQA import numpy as np - -from .butils import print_git_title - from numpy.testing import measure from nibabel.volumeutils import array_to_file # NOQA +from .butils import print_git_title + def bench_array_to_file(): rng = np.random.RandomState(20111001) diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 71ea801756..d313a7db5e 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -10,18 +10,19 @@ pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_arrayproxy_slicing.py """ -from timeit import timeit import gc import itertools as it -import numpy as np +from timeit import timeit from unittest import mock +import numpy as np + import nibabel as nib -from nibabel.tmpdirs import InTemporaryDirectory from nibabel.openers import HAVE_INDEXED_GZIP +from nibabel.tmpdirs import InTemporaryDirectory -from .butils import print_git_title from ..rstutils import rst_table +from .butils import print_git_title # if memory_profiler is installed, we get memory usage results try: diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index 59b6aa9314..cc3d837c2d 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -9,16 +9,16 @@ """ import sys +from io import BytesIO from timeit import timeit import numpy as np -from io import BytesIO -from ..openers import ImageOpener from ..fileslice import fileslice +from ..openers import ImageOpener +from ..optpkg import optional_package from ..rstutils import rst_table from ..tmpdirs import InTemporaryDirectory -from ..optpkg import optional_package SHAPE = (64, 64, 32, 100) ROW_NAMES = [f'axis {i}, len {dim}' for i, dim in enumerate(SHAPE)] diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 0a6ff576fa..edd839ce61 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -13,14 +13,12 @@ import sys import numpy as np - - -from .butils import print_git_title - from numpy.testing import measure from nibabel.volumeutils import finite_range # NOQA +from .butils import print_git_title + def bench_finite_range(): rng = np.random.RandomState(20111001) diff --git a/nibabel/benchmarks/bench_load_save.py b/nibabel/benchmarks/bench_load_save.py index d9c6461959..007753ce51 100644 --- a/nibabel/benchmarks/bench_load_save.py +++ b/nibabel/benchmarks/bench_load_save.py @@ -11,17 +11,14 @@ """ import sys +from io import BytesIO import numpy as np - -from io import BytesIO +from numpy.testing import measure from .. import Nifti1Image - from .butils import print_git_title -from numpy.testing import measure - def bench_load_save(): rng = np.random.RandomState(20111001) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 4a330893b3..0559671217 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -28,15 +28,15 @@ programs (e.g., example4d+orig'[0]'). """ -from copy import deepcopy import os import re +from copy import deepcopy import numpy as np from .arrayproxy import ArrayProxy from .fileslice import strided_scalar -from .spatialimages import SpatialImage, SpatialHeader, HeaderDataError, ImageDataError +from .spatialimages import HeaderDataError, ImageDataError, SpatialHeader, SpatialImage from .volumeutils import Recoder # used for doc-tests diff --git a/nibabel/casting.py b/nibabel/casting.py index c2bceeaf0f..ce58915fe9 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -6,7 +6,7 @@ import warnings from numbers import Integral -from platform import processor, machine +from platform import machine, processor import numpy as np diff --git a/nibabel/cifti2/__init__.py b/nibabel/cifti2/__init__.py index e7c999b6cd..9c6805f818 100644 --- a/nibabel/cifti2/__init__.py +++ b/nibabel/cifti2/__init__.py @@ -17,28 +17,28 @@ cifti2_axes """ -from .parse_cifti2 import Cifti2Extension from .cifti2 import ( - Cifti2MetaData, + CIFTI_BRAIN_STRUCTURES, + CIFTI_MODEL_TYPES, + Cifti2BrainModel, Cifti2Header, + Cifti2HeaderError, Cifti2Image, Cifti2Label, Cifti2LabelTable, - Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, - Cifti2BrainModel, Cifti2Matrix, Cifti2MatrixIndicesMap, + Cifti2MetaData, Cifti2NamedMap, Cifti2Parcel, Cifti2Surface, Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2VertexIndices, Cifti2Vertices, Cifti2Volume, - CIFTI_BRAIN_STRUCTURES, - Cifti2HeaderError, - CIFTI_MODEL_TYPES, + Cifti2VoxelIndicesIJK, load, save, ) -from .cifti2_axes import Axis, BrainModelAxis, ParcelsAxis, SeriesAxis, LabelAxis, ScalarAxis +from .cifti2_axes import Axis, BrainModelAxis, LabelAxis, ParcelsAxis, ScalarAxis, SeriesAxis +from .parse_cifti2 import Cifti2Extension diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 4b6fd3df25..497b796dca 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -17,19 +17,19 @@ http://www.nitrc.org/projects/cifti """ import re -from collections.abc import MutableSequence, MutableMapping, Iterable from collections import OrderedDict +from collections.abc import Iterable, MutableMapping, MutableSequence from warnings import warn import numpy as np from .. import xmlutils as xml -from ..filebasedimages import FileBasedHeader, SerializableImage -from ..dataobj_images import DataobjImage -from ..nifti1 import Nifti1Extensions -from ..nifti2 import Nifti2Image, Nifti2Header from ..arrayproxy import reshape_dataobj from ..caret import CaretMetaData +from ..dataobj_images import DataobjImage +from ..filebasedimages import FileBasedHeader, SerializableImage +from ..nifti1 import Nifti1Extensions +from ..nifti2 import Nifti2Header, Nifti2Image from ..volumeutils import make_dt_codes @@ -1473,7 +1473,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): img : Cifti2Image Returns a Cifti2Image """ - from .parse_cifti2 import _Cifti2AsNiftiImage, Cifti2Extension + from .parse_cifti2 import Cifti2Extension, _Cifti2AsNiftiImage nifti_img = _Cifti2AsNiftiImage.from_file_map( file_map, mmap=mmap, keep_file_open=keep_file_open diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 31e4ab55ab..3d88fca1e3 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -118,10 +118,12 @@ ... bm_cortex))) """ +import abc +from operator import xor + import numpy as np + from . import cifti2 -from operator import xor -import abc def from_index_mapping(mim): diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 36db0fa290..550d8e30bd 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -7,37 +7,37 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import numpy as np from io import BytesIO +import numpy as np from packaging.version import Version, parse +from .. import xmlutils as xml +from ..batteryrunners import Report +from ..nifti1 import Nifti1Extension, extension_codes, intent_codes +from ..nifti2 import Nifti2Header, Nifti2Image +from ..spatialimages import HeaderDataError from .cifti2 import ( - Cifti2MetaData, + CIFTI_BRAIN_STRUCTURES, + CIFTI_MODEL_TYPES, + Cifti2BrainModel, Cifti2Header, + Cifti2HeaderError, Cifti2Label, Cifti2LabelTable, - Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, - Cifti2BrainModel, Cifti2Matrix, Cifti2MatrixIndicesMap, + Cifti2MetaData, Cifti2NamedMap, Cifti2Parcel, Cifti2Surface, Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2VertexIndices, Cifti2Vertices, Cifti2Volume, - CIFTI_BRAIN_STRUCTURES, - CIFTI_MODEL_TYPES, + Cifti2VoxelIndicesIJK, _underscore, - Cifti2HeaderError, ) -from .. import xmlutils as xml -from ..spatialimages import HeaderDataError -from ..batteryrunners import Report -from ..nifti1 import Nifti1Extension, extension_codes, intent_codes -from ..nifti2 import Nifti2Header, Nifti2Image class Cifti2Extension(Nifti1Extension): diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index ecb6be272b..b8940433af 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -1,9 +1,11 @@ +from copy import deepcopy + import numpy as np import pytest -from .test_cifti2io_axes import check_rewrite + import nibabel.cifti2.cifti2_axes as axes -from copy import deepcopy +from .test_cifti2io_axes import check_rewrite rand_affine = np.random.randn(4, 4) vol_shape = (5, 10, 3) diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index be10f8b0e0..98d97e34e2 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -4,15 +4,13 @@ from xml.etree import ElementTree import numpy as np +import pytest from nibabel import cifti2 as ci +from nibabel.cifti2.cifti2 import Cifti2HeaderError, _float_01, _value_if_klass from nibabel.nifti2 import Nifti2Header -from nibabel.cifti2.cifti2 import _float_01, _value_if_klass, Cifti2HeaderError - -import pytest - from nibabel.tests.test_dataobj_images import TestDataobjAPI as _TDA -from nibabel.tests.test_image_api import SerializeMixin, DtypeOverrideMixin +from nibabel.tests.test_image_api import DtypeOverrideMixin, SerializeMixin def compare_xml_leaf(str1, str2): diff --git a/nibabel/cifti2/tests/test_cifti2io_axes.py b/nibabel/cifti2/tests/test_cifti2io_axes.py index 756b0f6c9f..2f5e781e44 100644 --- a/nibabel/cifti2/tests/test_cifti2io_axes.py +++ b/nibabel/cifti2/tests/test_cifti2io_axes.py @@ -1,10 +1,12 @@ -from nibabel.cifti2 import cifti2_axes, cifti2 -from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data -import nibabel as nib import os -import numpy as np import tempfile +import numpy as np + +import nibabel as nib +from nibabel.cifti2 import cifti2, cifti2_axes +from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data + test_directory = os.path.join(get_nibabel_data(), 'nitest-cifti2') hcp_labels = [ diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 3497ec413f..7315a0d1f2 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -7,21 +7,20 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from os.path import join as pjoin, dirname import io +from os.path import dirname +from os.path import join as pjoin +import pytest +from numpy.testing import assert_array_almost_equal from packaging.version import Version import nibabel as nib from nibabel import cifti2 as ci from nibabel.cifti2.parse_cifti2 import _Cifti2AsNiftiHeader - -from nibabel.tmpdirs import InTemporaryDirectory -from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data from nibabel.tests import test_nifti2 as tn2 - -from numpy.testing import assert_array_almost_equal -import pytest +from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data +from nibabel.tmpdirs import InTemporaryDirectory NIBABEL_TEST_DATA = pjoin(dirname(nib.__file__), 'tests', 'data') NIFTI2_DATA = pjoin(NIBABEL_TEST_DATA, 'example_nifti2.nii.gz') diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 15c6c110b9..84f1376f1f 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -7,17 +7,17 @@ scratch. """ import numpy as np +import pytest import nibabel as nib from nibabel import cifti2 as ci from nibabel.tmpdirs import InTemporaryDirectory -import pytest from ...testing import ( + assert_array_equal, clear_and_catch_warnings, error_warnings, suppress_warnings, - assert_array_equal, ) affine = [[-1.5, 0, 0, 90], [0, 1.5, 0, -85], [0, 0, 1.5, -71], [0, 0, 0, 1.0]] diff --git a/nibabel/cmdline/convert.py b/nibabel/cmdline/convert.py index ce80d8c709..c0bc8f212e 100644 --- a/nibabel/cmdline/convert.py +++ b/nibabel/cmdline/convert.py @@ -12,8 +12,8 @@ """ import argparse -from pathlib import Path import warnings +from pathlib import Path import nibabel as nib diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index efba4809c7..8de1438544 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -9,13 +9,13 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove -import sys -import os -import stat import errno -import time import locale import logging +import os +import stat +import sys +import time class dummy_fuse: @@ -32,11 +32,11 @@ class dummy_fuse: except ImportError: fuse = dummy_fuse +from optparse import Option, OptionParser + import nibabel as nib import nibabel.dft as dft -from optparse import OptionParser, Option - encoding = locale.getdefaultlocale()[1] fuse.fuse_python_api = (0, 2) diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 5ec5f425ee..5ca691ad64 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -16,17 +16,17 @@ with native endianness used in data files. """ +import hashlib +import os import re import sys from collections import OrderedDict -from optparse import OptionParser, Option +from optparse import Option, OptionParser import numpy as np import nibabel as nib import nibabel.cmdline.utils -import hashlib -import os def get_opt_parser(): diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 1bb9396bb3..c78c0910bf 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -12,13 +12,13 @@ """ import sys -from optparse import OptionParser, Option +from optparse import Option, OptionParser import numpy as np import nibabel as nib import nibabel.cmdline.utils -from nibabel.cmdline.utils import _err, verbose, table2string, ap, safe_get +from nibabel.cmdline.utils import _err, ap, safe_get, table2string, verbose __copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' 'and NiBabel contributors' __license__ = 'MIT' diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index f0d5b207f7..d6d3d6afe7 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -1,21 +1,23 @@ """Code for PAR/REC to NIfTI converter command """ -from optparse import OptionParser, Option +import csv +import os +import sys +from optparse import Option, OptionParser + import numpy as np import numpy.linalg as npl -import sys -import os -import csv + import nibabel -import nibabel.parrec as pr -from nibabel.parrec import one_line -from nibabel.mriutils import calculate_dwell_time, MRIError import nibabel.nifti1 as nifti1 +import nibabel.parrec as pr +from nibabel.affines import apply_affine, from_matvec, to_matvec from nibabel.filename_parser import splitext_addext +from nibabel.mriutils import MRIError, calculate_dwell_time +from nibabel.orientations import apply_orientation, inv_ornt_aff, io_orientation +from nibabel.parrec import one_line from nibabel.volumeutils import fname_ext_ul_case -from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation -from nibabel.affines import apply_affine, from_matvec, to_matvec def get_opt_parser(): diff --git a/nibabel/cmdline/roi.py b/nibabel/cmdline/roi.py index 690bb0b646..36f00a033a 100644 --- a/nibabel/cmdline/roi.py +++ b/nibabel/cmdline/roi.py @@ -1,6 +1,7 @@ -import sys -import os import argparse +import os +import sys + import nibabel as nb diff --git a/nibabel/cmdline/stats.py b/nibabel/cmdline/stats.py index 5c5d58f93c..0a6fc14aeb 100644 --- a/nibabel/cmdline/stats.py +++ b/nibabel/cmdline/stats.py @@ -12,8 +12,9 @@ """ import argparse + +from nibabel.imagestats import count_nonzero_voxels, mask_volume from nibabel.loadsave import load -from nibabel.imagestats import mask_volume, count_nonzero_voxels def _get_parser(): diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index f50801c714..d5d29ba430 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -1,13 +1,12 @@ """ Convert tractograms (TCK -> TRK). """ -import os import argparse +import os import nibabel as nib - -from nibabel.streamlines import Field from nibabel.orientations import aff2axcodes +from nibabel.streamlines import Field def parse_args(): diff --git a/nibabel/cmdline/tests/test_conform.py b/nibabel/cmdline/tests/test_conform.py index 8e203b68f9..524e81fc79 100644 --- a/nibabel/cmdline/tests/test_conform.py +++ b/nibabel/cmdline/tests/test_conform.py @@ -13,9 +13,9 @@ import pytest import nibabel as nib -from nibabel.testing import test_data from nibabel.cmdline.conform import main from nibabel.optpkg import optional_package +from nibabel.testing import test_data _, have_scipy, _ = optional_package('scipy.ndimage') needs_scipy = unittest.skipUnless(have_scipy, 'These tests need scipy') diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 00f00602af..411726a9ea 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -8,13 +8,12 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import pytest - import numpy as np +import pytest import nibabel as nib -from nibabel.testing import test_data from nibabel.cmdline import convert +from nibabel.testing import test_data def test_convert_noop(tmp_path): diff --git a/nibabel/cmdline/tests/test_parrec2nii.py b/nibabel/cmdline/tests/test_parrec2nii.py index 2100f3f478..017df9813a 100644 --- a/nibabel/cmdline/tests/test_parrec2nii.py +++ b/nibabel/cmdline/tests/test_parrec2nii.py @@ -1,20 +1,17 @@ """Tests for the parrec2nii exe code """ -from os.path import join, isfile, basename +from os.path import basename, isfile, join +from unittest.mock import MagicMock, Mock, patch import numpy from numpy import array as npa +from numpy.testing import assert_almost_equal, assert_array_equal import nibabel from nibabel.cmdline import parrec2nii - -from unittest.mock import Mock, MagicMock, patch -from numpy.testing import assert_almost_equal, assert_array_equal - from nibabel.tests.test_parrec import EG_PAR, VARY_PAR from nibabel.tmpdirs import InTemporaryDirectory - AN_OLD_AFFINE = numpy.array( [ [-3.64994708, 0.0, 1.83564171, 123.66276611], diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index 6a1229f72e..ea3852b4da 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -1,13 +1,14 @@ import os -import numpy as np -import nibabel as nb -from nibabel.cmdline.roi import lossless_slice, parse_slice, main -from nibabel.testing import data_path - import unittest from unittest import mock + +import numpy as np import pytest +import nibabel as nb +from nibabel.cmdline.roi import lossless_slice, main, parse_slice +from nibabel.testing import data_path + def test_parse_slice(): assert parse_slice(None) == slice(None) diff --git a/nibabel/cmdline/tests/test_stats.py b/nibabel/cmdline/tests/test_stats.py index ced289cebb..576a408bce 100644 --- a/nibabel/cmdline/tests/test_stats.py +++ b/nibabel/cmdline/tests/test_stats.py @@ -8,13 +8,14 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from io import StringIO import sys +from io import StringIO + import numpy as np -from nibabel.loadsave import save -from nibabel.cmdline.stats import main from nibabel import Nifti1Image +from nibabel.cmdline.stats import main +from nibabel.loadsave import save def test_volume(tmpdir, capsys): diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 58cab3ba42..5f531769a9 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,16 +5,17 @@ Test running scripts """ +from collections import OrderedDict +from io import StringIO +from os.path import join as pjoin + +import numpy as np import pytest import nibabel as nib -import numpy as np -from nibabel.cmdline.utils import * from nibabel.cmdline.diff import * -from os.path import join as pjoin +from nibabel.cmdline.utils import * from nibabel.testing import data_path -from collections import OrderedDict -from io import StringIO def test_table2string(): diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index cc364af06d..6bfc2c8c3a 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -2,8 +2,8 @@ Convert tractograms (TRK -> TCK). """ -import os import argparse +import os import nibabel as nib diff --git a/nibabel/data.py b/nibabel/data.py index b29476a2d2..eaa6e77acf 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -3,15 +3,15 @@ """ Utilities to find files from NIPY data packages """ -import os -from os.path import join as pjoin +import configparser import glob +import os import sys -import configparser -from packaging.version import Version +from os.path import join as pjoin -from .environment import get_nipy_user_dir, get_nipy_system_dir +from packaging.version import Version +from .environment import get_nipy_system_dir, get_nipy_user_dir DEFAULT_INSTALL_HINT = ( 'If you have the package, have you set the ' 'path to the package correctly?' diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index f8df06157b..054bba5272 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -10,8 +10,8 @@ import numpy as np -from .filebasedimages import FileBasedImage from .deprecated import deprecate_with_version +from .filebasedimages import FileBasedImage class DataobjImage(FileBasedImage): diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 7b4ef5221f..251e10d64c 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -2,8 +2,8 @@ """ import functools -import warnings import re +import warnings _LEADING_WHITE = re.compile(r'^(\s*)') diff --git a/nibabel/dft.py b/nibabel/dft.py index fd944a2556..c805128951 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -12,20 +12,20 @@ import contextlib -import os -from os.path import join as pjoin -import tempfile import getpass import logging -import warnings +import os import sqlite3 +import tempfile +import warnings +from io import BytesIO +from os.path import join as pjoin import numpy -from io import BytesIO +from nibabel.optpkg import optional_package from .nifti1 import Nifti1Header -from nibabel.optpkg import optional_package pydicom = optional_package('pydicom')[0] diff --git a/nibabel/ecat.py b/nibabel/ecat.py index f72a81d5a4..03d3f26a74 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -48,11 +48,11 @@ import numpy as np -from .volumeutils import native_code, swapped_code, make_dt_codes, array_from_file -from .spatialimages import SpatialImage from .arraywriters import make_array_writer -from .wrapstruct import WrapStruct from .fileslice import canonical_slicers, predict_shape, slice2outax +from .spatialimages import SpatialImage +from .volumeutils import array_from_file, make_dt_codes, native_code, swapped_code +from .wrapstruct import WrapStruct BLOCK_SIZE = 512 diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index bb75b54b1e..b1d187e8c1 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -84,12 +84,10 @@ """ import math - from functools import reduce import numpy as np - _FLOAT_EPS_4 = np.finfo(float).eps * 4.0 diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index f74c7b56eb..eee822566b 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -11,8 +11,9 @@ import io from copy import deepcopy from urllib import request + from .fileholders import FileHolder -from .filename_parser import types_filenames, TypesFilenamesError, splitext_addext +from .filename_parser import TypesFilenamesError, splitext_addext, types_filenames from .openers import ImageOpener diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 8df199d0d2..75da3ff85f 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -2,14 +2,12 @@ """ import operator -from numbers import Integral -from mmap import mmap - from functools import reduce +from mmap import mmap +from numbers import Integral import numpy as np - # Threshold for memory gap above which we always skip, to save memory # This value came from trying various values and looking at the timing with # ``bench_fileslice`` diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index 83c12f8682..806d19a272 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -2,12 +2,12 @@ """ from .io import ( - read_geometry, - read_morph_data, - write_morph_data, read_annot, + read_geometry, read_label, - write_geometry, + read_morph_data, write_annot, + write_geometry, + write_morph_data, ) -from .mghformat import load, save, MGHImage +from .mghformat import MGHImage, load, save diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 36013c3af2..b6f003b984 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -1,14 +1,14 @@ """Read / write FreeSurfer geometry, morphometry, label, annotation formats """ -import warnings -import numpy as np import getpass import time - +import warnings from collections import OrderedDict -from ..openers import Opener +import numpy as np + +from ..openers import Opener _ANNOT_DT = '>i4' """Data type for Freesurfer `.annot` files. diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 45881ba313..6358a6af81 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -11,17 +11,18 @@ Author: Krish Subramaniam """ from os.path import splitext + import numpy as np -from ..affines import voxel_sizes, from_matvec -from ..volumeutils import array_to_file, array_from_file, endian_codes, Recoder +from ..affines import from_matvec, voxel_sizes +from ..arrayproxy import ArrayProxy, reshape_dataobj +from ..batteryrunners import BatteryRunner, Report from ..filebasedimages import SerializableImage -from ..filename_parser import _stringify_path -from ..spatialimages import HeaderDataError, SpatialImage from ..fileholders import FileHolder -from ..arrayproxy import ArrayProxy, reshape_dataobj +from ..filename_parser import _stringify_path from ..openers import ImageOpener -from ..batteryrunners import BatteryRunner, Report +from ..spatialimages import HeaderDataError, SpatialImage +from ..volumeutils import Recoder, array_from_file, array_to_file, endian_codes from ..wrapstruct import LabeledWrapStruct # mgh header diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 3c47f82031..2406679d73 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -1,33 +1,32 @@ -import os -from os.path import join as pjoin, isdir import getpass -import time -import struct import hashlib +import os +import struct +import time +import unittest import warnings +from os.path import isdir +from os.path import join as pjoin -from ...tmpdirs import InTemporaryDirectory - -import unittest -import pytest import numpy as np +import pytest from numpy.testing import assert_allclose, assert_array_equal +from ...fileslice import strided_scalar +from ...testing import clear_and_catch_warnings +from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data +from ...tmpdirs import InTemporaryDirectory from .. import ( - read_geometry, - read_morph_data, read_annot, + read_geometry, read_label, + read_morph_data, + write_annot, write_geometry, write_morph_data, - write_annot, ) from ..io import _pack_rgb -from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data -from ...fileslice import strided_scalar -from ...testing import clear_and_catch_warnings - DATA_SDIR = 'fsaverage' have_freesurfer = False diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 29f1687c29..ee0ed50fec 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -8,30 +8,25 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for mghformat reading writing""" -import os import io +import os import numpy as np +import pytest +from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_array_equal -from .. import load, save -from ...openers import ImageOpener -from ..mghformat import MGHHeader, MGHError, MGHImage -from ...tmpdirs import InTemporaryDirectory +from ... import imageglobals from ...fileholders import FileHolder +from ...openers import ImageOpener from ...spatialimages import HeaderDataError -from ...volumeutils import sys_is_le -from ...wrapstruct import WrapStructError -from ... import imageglobals - - -import pytest - -from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal - from ...testing import data_path - from ...tests import test_spatialimages as tsi from ...tests import test_wrapstruct as tws +from ...tmpdirs import InTemporaryDirectory +from ...volumeutils import sys_is_le +from ...wrapstruct import WrapStructError +from .. import load, save +from ..mghformat import MGHError, MGHHeader, MGHImage MGZ_FNAME = os.path.join(data_path, 'test.mgz') diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 02b9e3ecd7..f83ed68709 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -9,8 +9,8 @@ """Processor functions for images""" import numpy as np -from .orientations import io_orientation, OrientationError from .loadsave import load +from .orientations import OrientationError, io_orientation def squeeze_image(img): diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index 2faaf5ab57..824c968afc 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -18,11 +18,11 @@ """ from .gifti import ( - GiftiMetaData, - GiftiNVPairs, - GiftiLabelTable, - GiftiLabel, GiftiCoordSystem, GiftiDataArray, GiftiImage, + GiftiLabel, + GiftiLabelTable, + GiftiMetaData, + GiftiNVPairs, ) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 8f5efa8ad8..7313f984f2 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -12,17 +12,18 @@ from http://www.nitrc.org/projects/gifti/ """ -import sys -import numpy as np import base64 +import sys import warnings +import numpy as np + from .. import xmlutils as xml -from ..filebasedimages import SerializableImage -from ..nifti1 import data_type_codes, xform_codes, intent_codes from ..caret import CaretMetaData -from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes, KIND2FMT from ..deprecated import deprecate_with_version +from ..filebasedimages import SerializableImage +from ..nifti1 import data_type_codes, intent_codes, xform_codes +from .util import KIND2FMT, array_index_order_codes, gifti_encoding_codes, gifti_endian_codes class _GiftiMDList(list): diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 5de4c2e22c..88c63b5600 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -8,26 +8,26 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import base64 +import os.path as op import sys import warnings import zlib -import os.path as op from io import StringIO from xml.parsers.expat import ExpatError import numpy as np +from ..nifti1 import data_type_codes, intent_codes, xform_codes +from ..xmlutils import XmlParser from .gifti import ( - GiftiMetaData, + GiftiCoordSystem, + GiftiDataArray, GiftiImage, GiftiLabel, GiftiLabelTable, - GiftiDataArray, - GiftiCoordSystem, + GiftiMetaData, ) from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes -from ..nifti1 import data_type_codes, xform_codes, intent_codes -from ..xmlutils import XmlParser class GiftiParseError(ExpatError): diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 73ae9ed95d..8858de589f 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -1,29 +1,29 @@ """Testing gifti objects """ -import warnings +import itertools import sys +import warnings from io import BytesIO import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from nibabel.tmpdirs import InTemporaryDirectory from ... import load +from ...fileholders import FileHolder +from ...nifti1 import data_type_codes +from ...testing import test_data from .. import ( - GiftiImage, + GiftiCoordSystem, GiftiDataArray, + GiftiImage, GiftiLabel, GiftiLabelTable, GiftiMetaData, GiftiNVPairs, - GiftiCoordSystem, ) -from ...nifti1 import data_type_codes -from ...fileholders import FileHolder - -from numpy.testing import assert_array_almost_equal, assert_array_equal -import pytest -from ...testing import test_data from .test_parse_gifti_fast import ( DATA_FILE1, DATA_FILE2, @@ -32,7 +32,6 @@ DATA_FILE5, DATA_FILE6, ) -import itertools def test_agg_data(): diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index d1f61d3c22..c7a958a5f8 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -7,26 +7,24 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from os.path import join as pjoin, dirname, basename +import shutil import sys import warnings -import shutil +from os.path import basename, dirname +from os.path import join as pjoin from unittest import mock import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal -from .. import gifti as gi -from ..util import gifti_endian_codes -from ..parse_gifti_fast import GiftiParseError, GiftiImageParser from ...loadsave import load, save from ...nifti1 import xform_codes -from ...tmpdirs import InTemporaryDirectory - -from numpy.testing import assert_array_almost_equal - -import pytest from ...testing import clear_and_catch_warnings, suppress_warnings - +from ...tmpdirs import InTemporaryDirectory +from .. import gifti as gi +from ..parse_gifti_fast import GiftiImageParser, GiftiParseError +from ..util import gifti_endian_codes IO_DATA_PATH = pjoin(dirname(__file__), 'data') diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index 614692daac..ac27a6ecac 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -15,11 +15,11 @@ from .gifti import GiftiImage from .minc1 import Minc1Image from .minc2 import Minc2Image -from .nifti1 import Nifti1Pair, Nifti1Image -from .nifti2 import Nifti2Pair, Nifti2Image +from .nifti1 import Nifti1Image, Nifti1Pair +from .nifti2 import Nifti2Image, Nifti2Pair from .parrec import PARRECImage -from .spm99analyze import Spm99AnalyzeImage from .spm2analyze import Spm2AnalyzeImage +from .spm99analyze import Spm99AnalyzeImage # Ordered by the load/save priority. all_image_classes = [ diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index f507365e93..6f1b68178b 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -11,6 +11,7 @@ """ import numpy as np + from nibabel.imageclasses import spatial_axes_first diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 187644a8e1..f64f3e8230 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -10,14 +10,15 @@ """Utilities to load and save image objects""" import os + import numpy as np -from .filename_parser import splitext_addext, _stringify_path -from .openers import ImageOpener -from .filebasedimages import ImageFileError -from .imageclasses import all_image_classes from .arrayproxy import is_proxy from .deprecated import deprecate_with_version +from .filebasedimages import ImageFileError +from .filename_parser import _stringify_path, splitext_addext +from .imageclasses import all_image_classes +from .openers import ImageOpener _compressed_suffixes = ('.gz', '.bz2', '.zst') diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 56b8747fb4..d6d2d3081b 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -13,9 +13,8 @@ import numpy as np from .externals.netcdf import netcdf_file - -from .spatialimages import SpatialHeader, SpatialImage from .fileslice import canonical_slicers +from .spatialimages import SpatialHeader, SpatialImage _dt_dict = { ('b', 'unsigned'): np.uint8, diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 275a7799c8..9638ced5ee 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -27,7 +27,7 @@ """ import numpy as np -from .minc1 import Minc1File, MincHeader, Minc1Image, MincError +from .minc1 import Minc1File, Minc1Image, MincError, MincHeader class Hdf5Bunch: diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index 10471e586a..d03845f900 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -3,11 +3,10 @@ """ Parse the "ASCCONV" meta data format found in a variety of Siemens MR files. """ -import re import ast +import re from collections import OrderedDict - ASCCONV_RE = re.compile( r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###', flags=re.M | re.S, diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 3f5293dcc3..a3c49d7f10 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -1,5 +1,5 @@ -from os.path import join as pjoin import glob +from os.path import join as pjoin import numpy as np diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 3c7268dbe0..be070e8608 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -18,10 +18,11 @@ import numpy as np from nibabel.optpkg import optional_package + +from ..onetime import auto_attr as one_time +from ..openers import ImageOpener from . import csareader as csar from .dwiparams import B2q, nearest_pos_semi_def, q2bg -from ..openers import ImageOpener -from ..onetime import auto_attr as one_time pydicom = optional_package('pydicom')[0] diff --git a/nibabel/nicom/tests/__init__.py b/nibabel/nicom/tests/__init__.py index 4a7ea3b284..ec2c5b2f38 100644 --- a/nibabel/nicom/tests/__init__.py +++ b/nibabel/nicom/tests/__init__.py @@ -1,4 +1,5 @@ import unittest + from nibabel.optpkg import optional_package pydicom, have_dicom, _ = optional_package('pydicom') diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index 6415c2725e..4737d3615d 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -1,15 +1,15 @@ """Testing Siemens "ASCCONV" parser """ -from os.path import join as pjoin, dirname from collections import OrderedDict +from os.path import dirname +from os.path import join as pjoin import numpy as np +from numpy.testing import assert_array_almost_equal, assert_array_equal from .. import ascconv -from numpy.testing import assert_array_equal, assert_array_almost_equal - DATA_PATH = pjoin(dirname(__file__), 'data') ASCCONV_INPUT = pjoin(DATA_PATH, 'ascconv_sample.txt') diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 1dfe348c4b..0fc559c7fc 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -1,18 +1,17 @@ """Testing Siemens CSA header reader """ +import gzip import sys -from os.path import join as pjoin from copy import deepcopy -import gzip +from os.path import join as pjoin import numpy as np +import pytest from .. import csareader as csa from .. import dwiparams as dwp - -import pytest -from . import pydicom, dicom_test -from .test_dicomwrappers import IO_DATA_PATH, DATA +from . import dicom_test, pydicom +from .test_dicomwrappers import DATA, IO_DATA_PATH CSA2_B0 = open(pjoin(IO_DATA_PATH, 'csa2_b0.bin'), 'rb').read() CSA2_B1000 = open(pjoin(IO_DATA_PATH, 'csa2_b1000.bin'), 'rb').read() diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index dba29b6503..b7a60dfc3b 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -4,13 +4,13 @@ from os.path import join as pjoin import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from nibabel.optpkg import optional_package -from .. import dicomreaders as didr -from .test_dicomwrappers import EXPECTED_AFFINE, EXPECTED_PARAMS, IO_DATA_PATH, DATA -import pytest -from numpy.testing import assert_array_equal, assert_array_almost_equal +from .. import dicomreaders as didr +from .test_dicomwrappers import DATA, EXPECTED_AFFINE, EXPECTED_PARAMS, IO_DATA_PATH pydicom, _, setup_module = optional_package('pydicom') diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 3dd1665c3f..3efa7f3aab 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -1,24 +1,23 @@ """Testing DICOM wrappers """ -from os.path import join as pjoin, dirname import gzip -from hashlib import sha1 -from decimal import Decimal from copy import copy +from decimal import Decimal +from hashlib import sha1 +from os.path import dirname +from os.path import join as pjoin +from unittest import TestCase import numpy as np - -from . import pydicom, have_dicom, dicom_test -from .. import dicomwrappers as didw -from .. import dicomreaders as didr -from ...volumeutils import endian_codes - import pytest -from unittest import TestCase +from numpy.testing import assert_array_almost_equal, assert_array_equal -from numpy.testing import assert_array_equal, assert_array_almost_equal from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data +from ...volumeutils import endian_codes +from .. import dicomreaders as didr +from .. import dicomwrappers as didw +from . import dicom_test, have_dicom, pydicom IO_DATA_PATH = pjoin(dirname(__file__), 'data') DATA_FILE = pjoin(IO_DATA_PATH, 'siemens_dwi_1000.dcm.gz') diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index 8a869c01db..6e98b4af61 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -2,12 +2,11 @@ """ import numpy as np - -from ..dwiparams import B2q, q2bg - import pytest +from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_equal as np_assert_equal -from numpy.testing import assert_array_almost_equal, assert_equal as np_assert_equal +from ..dwiparams import B2q, q2bg def test_b2q(): diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index c7815cd6fb..2d37bbc3ed 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -1,7 +1,7 @@ """Testing Siemens CSA header reader """ -import sys import struct +import sys from ..structreader import Unpacker diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index edd20f9973..37dbcd7d19 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -3,8 +3,9 @@ import re from nibabel.optpkg import optional_package -from .test_dicomwrappers import DATA, DATA_PHILIPS + from ..utils import find_private_section +from .test_dicomwrappers import DATA, DATA_PHILIPS pydicom, _, setup_module = optional_package('pydicom') diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 625fe6baa9..0d28298313 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -17,16 +17,16 @@ import numpy.linalg as npl from numpy.compat.py3k import asstr +from . import analyze # module import from .arrayproxy import get_obj_dtype -from .optpkg import optional_package +from .batteryrunners import Report +from .casting import have_binary128 from .filebasedimages import SerializableImage -from .volumeutils import Recoder, make_dt_codes, endian_codes +from .optpkg import optional_package +from .quaternions import fillpositive, mat2quat, quat2mat from .spatialimages import HeaderDataError, ImageFileError -from .batteryrunners import Report -from .quaternions import fillpositive, quat2mat, mat2quat -from . import analyze # module import from .spm99analyze import SpmAnalyzeHeader -from .casting import have_binary128 +from .volumeutils import Recoder, endian_codes, make_dt_codes pdcm, have_dicom, _ = optional_package('pydicom') diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 9e8e597772..193e458c6b 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -17,8 +17,8 @@ from .analyze import AnalyzeHeader from .batteryrunners import Report +from .nifti1 import Nifti1Header, Nifti1Image, Nifti1Pair from .spatialimages import HeaderDataError, ImageFileError -from .nifti1 import Nifti1Header, Nifti1Pair, Nifti1Image r""" Header struct from : https://www.nitrc.org/forum/message.php?msg_id=3738 diff --git a/nibabel/openers.py b/nibabel/openers.py index 6338711cd7..4a1b911c95 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -9,10 +9,11 @@ """Context manager openers for various fileobject types """ -from bz2 import BZ2File import gzip import warnings +from bz2 import BZ2File from os.path import splitext + from packaging.version import Version from nibabel.optpkg import optional_package diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 090a73c366..c91ad0f1e8 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,5 +1,6 @@ """Routines to support optional packages""" from packaging.version import Version + from .tripwire import TripWire diff --git a/nibabel/parrec.py b/nibabel/parrec.py index c7d7a55617..04184117dc 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -122,21 +122,22 @@ to a CSV file by adding the option "--volume-info". """ +import re import warnings -import numpy as np +from collections import OrderedDict from copy import deepcopy -import re from io import StringIO from locale import getpreferredencoding -from collections import OrderedDict -from .spatialimages import SpatialHeader, SpatialImage +import numpy as np + +from .affines import apply_affine, dot_reduce, from_matvec from .eulerangles import euler2mat -from .volumeutils import Recoder, array_from_file -from .affines import from_matvec, dot_reduce, apply_affine -from .nifti1 import unit_codes from .fileslice import fileslice, strided_scalar +from .nifti1 import unit_codes from .openers import ImageOpener +from .spatialimages import SpatialHeader, SpatialImage +from .volumeutils import Recoder, array_from_file # PSL to RAS affine PSL_TO_RAS = np.array( diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 4d0257f4d6..010e4107ac 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -1,5 +1,7 @@ import sys + from packaging.version import Version + from . import _version __version__ = _version.get_versions()['version'] diff --git a/nibabel/processing.py b/nibabel/processing.py index 336e9b40f1..669b416fb6 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -22,11 +22,11 @@ spnd, _, _ = optional_package('scipy.ndimage') -from .affines import AffineError, to_matvec, from_matvec, append_diag, rescale_affine -from .spaces import vox2out_vox +from .affines import AffineError, append_diag, from_matvec, rescale_affine, to_matvec +from .imageclasses import spatial_axes_first from .nifti1 import Nifti1Image from .orientations import axcodes2ornt, io_orientation, ornt_transform -from .imageclasses import spatial_axes_first +from .spaces import vox2out_vox SIGMA2FWHM = np.sqrt(8 * np.log(2)) diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index a58c2fdba9..9ee2553c5a 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -33,11 +33,10 @@ except ImportError: have_dicom = False else: # pydicom module available - from pydicom.dicomio import read_file - from pydicom.sequence import Sequence - # Values not imported by default import pydicom.values + from pydicom.dicomio import read_file + from pydicom.sequence import Sequence if have_dicom: tag_for_keyword = pydicom.datadict.tag_for_keyword diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 7ae9a3c63a..7965029f3b 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -26,6 +26,7 @@ """ import math + import numpy as np MAX_FLOAT = np.maximum_sctype(float) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 7977943ffd..1adf63fe42 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -131,13 +131,13 @@ import numpy as np -from .filebasedimages import FileBasedHeader from .dataobj_images import DataobjImage from .filebasedimages import ImageFileError # noqa -from .viewers import OrthoSlicer3D -from .volumeutils import shape_zoom_affine +from .filebasedimages import FileBasedHeader from .fileslice import canonical_slicers from .orientations import apply_orientation, inv_ornt_aff +from .viewers import OrthoSlicer3D +from .volumeutils import shape_zoom_affine class HeaderDataError(Exception): diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 1f9d7a3589..cad77c4d09 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -8,15 +8,14 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read / write access to SPM99 version of analyze image format""" import warnings -import numpy as np - from io import BytesIO -from .spatialimages import HeaderDataError, HeaderTypeError +import numpy as np -from .batteryrunners import Report from . import analyze # module import +from .batteryrunners import Report from .optpkg import optional_package +from .spatialimages import HeaderDataError, HeaderTypeError have_scipy = optional_package('scipy')[1] diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 5e8d87b671..604c32b1e5 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -3,13 +3,12 @@ import os import warnings -from .header import Field from .array_sequence import ArraySequence -from .tractogram import Tractogram, LazyTractogram +from .header import Field +from .tck import TckFile +from .tractogram import LazyTractogram, Tractogram from .tractogram_file import ExtensionWarning - from .trk import TrkFile -from .tck import TckFile # List of all supported formats FORMATS = {'.trk': TrkFile, '.tck': TckFile} diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index bb03e6bfd0..f9e9af90e3 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -1,6 +1,6 @@ import numbers -from operator import mul from functools import reduce +from operator import mul import numpy as np diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 7fb5cde8b3..e08afb48ea 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -12,11 +12,9 @@ from nibabel.openers import Opener from .array_sequence import ArraySequence -from .tractogram_file import TractogramFile -from .tractogram_file import HeaderWarning, DataWarning -from .tractogram_file import HeaderError, DataError -from .tractogram import TractogramItem, Tractogram, LazyTractogram from .header import Field +from .tractogram import LazyTractogram, Tractogram, TractogramItem +from .tractogram_file import DataError, DataWarning, HeaderError, HeaderWarning, TractogramFile from .utils import peek_next MEGABYTE = 1024 * 1024 diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index a673c5ce9d..a3faa6a58b 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -1,16 +1,15 @@ +import itertools import os import sys -import unittest import tempfile -import itertools -import numpy as np +import unittest +import numpy as np import pytest -from ...testing import assert_arrays_equal from numpy.testing import assert_array_equal -from ..array_sequence import ArraySequence, is_array_sequence, concatenate - +from ...testing import assert_arrays_equal +from ..array_sequence import ArraySequence, concatenate, is_array_sequence SEQ_DATA = {} diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 09824b6ee9..dfb74042a3 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -1,25 +1,22 @@ import os -import unittest import tempfile -import numpy as np +import unittest import warnings +from io import BytesIO +from os.path import join as pjoin +import numpy as np import pytest - -from os.path import join as pjoin +from numpy.compat.py3k import asbytes import nibabel as nib -from io import BytesIO +from nibabel.testing import clear_and_catch_warnings, data_path, error_warnings from nibabel.tmpdirs import InTemporaryDirectory -from numpy.compat.py3k import asbytes - -from nibabel.testing import data_path, error_warnings, clear_and_catch_warnings +from .. import FORMATS, trk +from ..tractogram import LazyTractogram, Tractogram +from ..tractogram_file import ExtensionWarning, TractogramFile from .test_tractogram import assert_tractogram_equal -from ..tractogram import Tractogram, LazyTractogram -from ..tractogram_file import TractogramFile, ExtensionWarning -from .. import FORMATS -from .. import trk DATA = {} diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index 7f6e7307ba..f514d3f3df 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -1,21 +1,18 @@ import os import unittest -import numpy as np -from os.path import join as pjoin - from io import BytesIO +from os.path import join as pjoin -from ..array_sequence import ArraySequence -from ..tractogram import Tractogram -from ..tractogram_file import HeaderWarning, HeaderError -from ..tractogram_file import DataError - -from .. import tck as tck_module -from ..tck import TckFile - +import numpy as np import pytest from numpy.testing import assert_array_equal + from ...testing import data_path, error_warnings +from .. import tck as tck_module +from ..array_sequence import ArraySequence +from ..tck import TckFile +from ..tractogram import Tractogram +from ..tractogram_file import DataError, HeaderError, HeaderWarning from .test_tractogram import assert_tractogram_equal DATA = {} diff --git a/nibabel/streamlines/tests/test_tractogram.py b/nibabel/streamlines/tests/test_tractogram.py index c698f10e44..30294be438 100644 --- a/nibabel/streamlines/tests/test_tractogram.py +++ b/nibabel/streamlines/tests/test_tractogram.py @@ -1,19 +1,26 @@ -import sys import copy +import operator +import sys import unittest -import numpy as np import warnings -import operator from collections import defaultdict +import numpy as np import pytest -from ...testing import assert_arrays_equal, clear_and_catch_warnings -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_almost_equal, assert_array_equal +from ...testing import assert_arrays_equal, clear_and_catch_warnings from .. import tractogram as module_tractogram -from ..tractogram import is_data_dict, is_lazy_dict -from ..tractogram import TractogramItem, Tractogram, LazyTractogram -from ..tractogram import PerArrayDict, PerArraySequenceDict, LazyDict +from ..tractogram import ( + LazyDict, + LazyTractogram, + PerArrayDict, + PerArraySequenceDict, + Tractogram, + TractogramItem, + is_data_dict, + is_lazy_dict, +) DATA = {} diff --git a/nibabel/streamlines/tests/test_tractogram_file.py b/nibabel/streamlines/tests/test_tractogram_file.py index a1d89ccec6..53a7fb662b 100644 --- a/nibabel/streamlines/tests/test_tractogram_file.py +++ b/nibabel/streamlines/tests/test_tractogram_file.py @@ -1,11 +1,11 @@ """Test tractogramFile base class """ +import pytest + from ..tractogram import Tractogram from ..tractogram_file import TractogramFile -import pytest - def test_subclassing_tractogram_file(): diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index e23efc8d5d..b8ff43620b 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -1,28 +1,26 @@ +import copy import os import sys -import copy import unittest -import numpy as np -from os.path import join as pjoin - from io import BytesIO +from os.path import join as pjoin +import numpy as np import pytest -from ...testing import data_path, clear_and_catch_warnings, assert_arr_dict_equal, error_warnings from numpy.testing import assert_array_equal -from .test_tractogram import assert_tractogram_equal +from ...testing import assert_arr_dict_equal, clear_and_catch_warnings, data_path, error_warnings +from .. import trk as trk_module +from ..header import Field from ..tractogram import Tractogram from ..tractogram_file import HeaderError, HeaderWarning - -from .. import trk as trk_module from ..trk import ( TrkFile, - encode_value_in_name, decode_value_from_name, + encode_value_in_name, get_affine_trackvis_to_rasmm, ) -from ..header import Field +from .test_tractogram import assert_tractogram_equal DATA = {} diff --git a/nibabel/streamlines/tests/test_utils.py b/nibabel/streamlines/tests/test_utils.py index bcdde6d013..7836d45eb5 100644 --- a/nibabel/streamlines/tests/test_utils.py +++ b/nibabel/streamlines/tests/test_utils.py @@ -1,11 +1,11 @@ import os -import numpy as np -import nibabel as nib -from nibabel.testing import data_path +import numpy as np +import pytest from numpy.testing import assert_array_equal -import pytest +import nibabel as nib +from nibabel.testing import data_path from ..utils import get_affine_from_reference diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index cf9a099fe4..ded937ab11 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -1,8 +1,9 @@ import copy import numbers -import numpy as np -from warnings import warn from collections.abc import MutableMapping +from warnings import warn + +import numpy as np from nibabel.affines import apply_affine diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index eb382af4d0..bbf156ee08 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -2,27 +2,24 @@ # http://www.trackvis.org/docs/?subsect=fileformat import os -import struct import string +import struct import warnings import numpy as np from numpy.compat.py3k import asstr import nibabel as nib - from nibabel.openers import Opener -from nibabel.volumeutils import native_code, swapped_code, endian_codes from nibabel.orientations import aff2axcodes, axcodes2ornt +from nibabel.volumeutils import endian_codes, native_code, swapped_code from .array_sequence import create_arraysequences_from_generator -from .tractogram_file import TractogramFile -from .tractogram_file import DataError, HeaderError, HeaderWarning -from .tractogram import TractogramItem, Tractogram, LazyTractogram from .header import Field +from .tractogram import LazyTractogram, Tractogram, TractogramItem +from .tractogram_file import DataError, HeaderError, HeaderWarning, TractogramFile from .utils import peek_next - MAX_NB_NAMED_SCALARS_PER_POINT = 10 MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE = 10 diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 44cc82890b..4600782d4b 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -8,24 +8,21 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for testing""" -import re import os +import re import sys -import warnings -from pkg_resources import resource_filename - import unittest +import warnings +from contextlib import nullcontext +from itertools import zip_longest -import pytest import numpy as np +import pytest from numpy.testing import assert_array_equal +from pkg_resources import resource_filename +from .helpers import assert_data_similar, bytesio_filemap, bytesio_round_trip from .np_features import memmap_after_ufunc -from .helpers import bytesio_filemap, bytesio_round_trip, assert_data_similar - -from itertools import zip_longest - -from contextlib import nullcontext def test_data(subdir=None, fname=None): @@ -229,6 +226,7 @@ def setUp(self): def expires(version): """Decorator to mark a test as xfail with ExpiredDeprecationError after version""" from packaging.version import Version + from nibabel import __version__ as nbver from nibabel.deprecator import ExpiredDeprecationError diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index 17b36bd6dd..8ade7f539c 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -22,6 +22,7 @@ of the field of view. """ import glob + import numpy as np import numpy.linalg as npl diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index 2d736fb445..598726fe74 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -6,8 +6,8 @@ * standard.trk """ import numpy as np -import nibabel as nib +import nibabel as nib from nibabel.streamlines import FORMATS from nibabel.streamlines.header import Field diff --git a/nibabel/tests/data/make_moved_anat.py b/nibabel/tests/data/make_moved_anat.py index aee20eda97..678b5dfdeb 100644 --- a/nibabel/tests/data/make_moved_anat.py +++ b/nibabel/tests/data/make_moved_anat.py @@ -9,8 +9,8 @@ import numpy as np import nibabel as nib -from nibabel.eulerangles import euler2mat from nibabel.affines import from_matvec +from nibabel.eulerangles import euler2mat if __name__ == '__main__': img = nib.load('anatomical.nii') diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 06e5540674..8d4652d79f 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -1,10 +1,11 @@ """Functions / decorators for finding / requiring nibabel-data directory """ -from os import environ, listdir -from os.path import dirname, realpath, join as pjoin, isdir, exists - import unittest +from os import environ, listdir +from os.path import dirname, exists, isdir +from os.path import join as pjoin +from os.path import realpath def get_nibabel_data(): diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 474eeceb2c..1ec2fcb486 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -12,12 +12,12 @@ assert_equal(code, 0) assert_equal(stdout, b'This script ran OK') """ -import sys import os -from os.path import dirname, join as pjoin, isfile, isdir, realpath, pathsep - -from subprocess import Popen, PIPE - +import sys +from os.path import dirname, isdir, isfile +from os.path import join as pjoin +from os.path import pathsep, realpath +from subprocess import PIPE, Popen MY_PACKAGE = __package__ diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 08166df6e8..08ae5f4bda 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -4,26 +4,24 @@ from itertools import product import numpy as np +import pytest +from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_array_equal -from ..eulerangles import euler2mat from ..affines import ( AffineError, - apply_affine, append_diag, - to_matvec, - from_matvec, + apply_affine, dot_reduce, - voxel_sizes, + from_matvec, obliquity, rescale_affine, + to_matvec, + voxel_sizes, ) +from ..eulerangles import euler2mat from ..orientations import aff2axcodes -import pytest -from numpy.testing import assert_array_equal, assert_almost_equal, assert_array_almost_equal - - def validated_apply_affine(T, xyz): # This was the original apply_affine implementation that we've stashed here # to test against diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 2cea69413f..1f80addc30 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -12,38 +12,35 @@ header """ -import os -import re +import itertools import logging +import os import pickle -import itertools +import re +from io import BytesIO, StringIO import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from io import BytesIO, StringIO -from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types -from ..analyze import AnalyzeHeader, AnalyzeImage -from ..nifti1 import Nifti1Header -from ..loadsave import read_img_data from .. import imageglobals -from ..casting import as_int -from ..tmpdirs import InTemporaryDirectory +from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError +from ..casting import as_int +from ..loadsave import read_img_data +from ..nifti1 import Nifti1Header from ..optpkg import optional_package - -import pytest -from numpy.testing import assert_array_equal, assert_array_almost_equal - +from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types from ..testing import ( - data_path, - suppress_warnings, assert_dt_equal, bytesio_filemap, bytesio_round_trip, + data_path, + suppress_warnings, ) - -from . import test_wrapstruct as tws +from ..tmpdirs import InTemporaryDirectory from . import test_spatialimages as tsi +from . import test_wrapstruct as tws HAVE_ZSTD = optional_package('pyzstd')[1] diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 2382847da4..1d21092eef 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,6 +1,7 @@ """Metaclass and class for validating instance APIs """ import os + import pytest diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index e4d16e7dd8..5018e95e1f 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -9,29 +9,25 @@ """Tests for arrayproxy module """ -import warnings -import gzip import contextlib - +import gzip import pickle +import warnings from io import BytesIO -from packaging.version import Version -from ..tmpdirs import InTemporaryDirectory +from unittest import mock import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from packaging.version import Version from .. import __version__ -from ..arrayproxy import ArrayProxy, is_proxy, reshape_dataobj, get_obj_dtype -from ..openers import ImageOpener -from ..nifti1 import Nifti1Header +from ..arrayproxy import ArrayProxy, get_obj_dtype, is_proxy, reshape_dataobj from ..deprecator import ExpiredDeprecationError - -from unittest import mock - -from numpy.testing import assert_array_equal, assert_array_almost_equal -import pytest +from ..nifti1 import Nifti1Header +from ..openers import ImageOpener from ..testing import memmap_after_ufunc - +from ..tmpdirs import InTemporaryDirectory from .test_fileslice import slicer_samples from .test_openers import patch_indexed_gzip diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 1fbaa38916..e77c2fd11f 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -3,27 +3,26 @@ See docstring of :mod:`nibabel.arraywriters` for API. """ -from platform import python_compiler, machine import itertools +from io import BytesIO +from platform import machine, python_compiler + import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from io import BytesIO from ..arraywriters import ( - SlopeInterArrayWriter, + ArrayWriter, + ScalingError, SlopeArrayWriter, + SlopeInterArrayWriter, WriterError, - ScalingError, - ArrayWriter, - make_array_writer, get_slope_inter, + make_array_writer, ) -from ..casting import int_abs, type_info, shared_range, on_powerpc -from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max - -from numpy.testing import assert_array_almost_equal, assert_array_equal -import pytest +from ..casting import int_abs, on_powerpc, shared_range, type_info from ..testing import assert_allclose_safely, suppress_warnings - +from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file FLOAT_TYPES = np.sctypes['float'] COMPLEX_TYPES = np.sctypes['complex'] diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index d260d2db76..84590452ea 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -9,13 +9,13 @@ """Tests for BatteryRunner and Report objects """ +import logging from io import StringIO -import logging +import pytest from ..batteryrunners import BatteryRunner, Report -import pytest # define some trivial functions as checks def chk1(obj, fix=False): diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index ff9e91520e..b2c1f1257c 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -10,14 +10,11 @@ from os.path import join as pjoin import numpy as np - -from .. import load, Nifti1Image -from .. import brikhead - import pytest from numpy.testing import assert_array_equal -from ..testing import data_path, assert_data_similar +from .. import Nifti1Image, brikhead, load +from ..testing import assert_data_similar, data_path from .test_fileslice import slicer_samples EXAMPLE_IMAGES = [ diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index d16541b352..8c4cad7bbb 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -1,29 +1,27 @@ """Test casting utilities """ import os - from platform import machine + import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from ..casting import ( - float_to_int, - shared_range, CastingError, - int_to_float, - as_int, - int_abs, - floor_log2, able_int_type, + as_int, best_float, - ulp, + float_to_int, + floor_log2, + int_abs, + int_to_float, longdouble_precision_improved, + shared_range, + ulp, ) from ..testing import suppress_warnings -from numpy.testing import assert_array_almost_equal, assert_array_equal - -import pytest - def test_shared_range(): for ft in np.sctypes['float']: diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 0fbadc6af0..ece2e1c6cd 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -2,31 +2,27 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for data module""" import os -from os.path import join as pjoin -from os import environ as env import sys import tempfile +from os import environ as env +from os.path import join as pjoin + +import pytest +from .. import data as nibd from ..data import ( - get_data_path, - find_data_dir, + Bomber, DataError, - _cfg_value, - make_datasource, Datasource, VersionedDatasource, - Bomber, + _cfg_value, datasource_or_bomber, + find_data_dir, + get_data_path, + make_datasource, ) - from ..tmpdirs import TemporaryDirectory - -from .. import data as nibd - - -import pytest - -from .test_environment import with_environment, DATA_KEY, USER_KEY +from .test_environment import DATA_KEY, USER_KEY, with_environment @pytest.fixture diff --git a/nibabel/tests/test_dataobj_images.py b/nibabel/tests/test_dataobj_images.py index dfbb0fe4cb..a1d2dbc9f1 100644 --- a/nibabel/tests/test_dataobj_images.py +++ b/nibabel/tests/test_dataobj_images.py @@ -3,11 +3,10 @@ import numpy as np -from nibabel.filebasedimages import FileBasedHeader from nibabel.dataobj_images import DataobjImage - -from nibabel.tests.test_image_api import DataInterfaceMixin +from nibabel.filebasedimages import FileBasedHeader from nibabel.tests.test_filebasedimages import TestFBImageAPI as _TFI +from nibabel.tests.test_image_api import DataInterfaceMixin class DoNumpyImage(DataobjImage): diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index cd56f507f9..962f9c0827 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -2,12 +2,11 @@ """ import warnings + import pytest from nibabel import pkg_info -from nibabel.deprecated import ModuleProxy, FutureWarningMixin, deprecate_with_version - - +from nibabel.deprecated import FutureWarningMixin, ModuleProxy, deprecate_with_version from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 31b61f5153..833908af94 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -9,12 +9,12 @@ import pytest from nibabel.deprecator import ( - _ensure_cr, - _add_dep_doc, - ExpiredDeprecationError, - Deprecator, - TESTSETUP, TESTCLEANUP, + TESTSETUP, + Deprecator, + ExpiredDeprecationError, + _add_dep_doc, + _ensure_cr, ) from ..testing import clear_and_catch_warnings diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index b43b2762f7..f756600fd3 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -2,18 +2,22 @@ """ import os -from os.path import join as pjoin, dirname +import sqlite3 from io import BytesIO +from os.path import dirname +from os.path import join as pjoin + from ..testing import suppress_warnings -import sqlite3 with suppress_warnings(): from .. import dft -from .. import nifti1 import unittest + import pytest +from .. import nifti1 + # Shield optional package imports from ..optpkg import optional_package diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py index b1f05177bb..fee71d628b 100644 --- a/nibabel/tests/test_diff.py +++ b/nibabel/tests/test_diff.py @@ -3,9 +3,10 @@ """Test diff """ -from os.path import dirname, join as pjoin, abspath -import numpy as np +from os.path import abspath, dirname +from os.path import join as pjoin +import numpy as np DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) @@ -41,7 +42,7 @@ def test_diff_values_mixed(): def test_diff_values_array(): - from numpy import nan, array, inf + from numpy import array, inf, nan a_int = array([1, 2]) a_float = a_int.astype(float) diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 875e06c0a7..9cb9f91e1a 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -9,27 +9,23 @@ import os import warnings +from unittest import TestCase import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from ..openers import Opener from ..ecat import ( EcatHeader, - EcatSubHeader, EcatImage, - read_mlist, + EcatSubHeader, get_frame_order, get_series_framenumbers, + read_mlist, ) - -from unittest import TestCase -import pytest - -from numpy.testing import assert_array_equal, assert_array_almost_equal - +from ..openers import Opener from ..testing import data_path, suppress_warnings from ..tmpdirs import InTemporaryDirectory - from . import test_wrapstruct as tws from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index de4164cd3c..b7dbe4750a 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -13,11 +13,10 @@ from os.path import join as pjoin import numpy as np +from numpy.testing import assert_almost_equal, assert_array_equal -from .nibabel_data import get_nibabel_data, needs_nibabel_data from ..ecat import load - -from numpy.testing import assert_array_equal, assert_almost_equal +from .nibabel_data import get_nibabel_data, needs_nibabel_data ECAT_TEST_PATH = pjoin(get_nibabel_data(), 'nipy-ecattest') diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index 5742edef43..afb6d36f84 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -3,13 +3,13 @@ import os from os import environ as env -from os.path import join as pjoin, abspath +from os.path import abspath +from os.path import join as pjoin +import pytest from .. import environment as nibe -import pytest - DATA_KEY = 'NIPY_DATA_PATH' USER_KEY = 'NIPY_USER_DIR' diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 25e4c776d2..8b0fb932d5 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -9,15 +9,15 @@ """Tests for Euler angles""" import math + import numpy as np +import pytest from numpy import pi +from numpy.testing import assert_array_almost_equal, assert_array_equal from .. import eulerangles as nea from .. import quaternions as nq -import pytest -from numpy.testing import assert_array_equal, assert_array_almost_equal - FLOAT_EPS = np.finfo(np.float64).eps # Example rotations """ diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index aee02f5a68..aa48a3e747 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -1,14 +1,13 @@ """Testing filebasedimages module """ -from itertools import product import warnings +from itertools import product import numpy as np import pytest from ..filebasedimages import FileBasedHeader, FileBasedImage, SerializableImage - from .test_image_api import GenericImageAPI, SerializeMixin diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 73698b23ac..506a623758 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -2,10 +2,10 @@ Check that loading an image does not use up filehandles. """ -from os.path import join as pjoin import shutil -from tempfile import mkdtemp import unittest +from os.path import join as pjoin +from tempfile import mkdtemp import numpy as np diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index a0e50e4133..33b3f76e6f 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -3,7 +3,6 @@ from io import BytesIO - from ..fileholders import FileHolder diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index b4a816a137..29da7b6f61 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -8,10 +8,10 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for filename container""" -from ..filename_parser import types_filenames, TypesFilenamesError, parse_filename, splitext_addext - import pytest +from ..filename_parser import TypesFilenamesError, parse_filename, splitext_addext, types_filenames + def test_filenames(): types_exts = (('image', '.img'), ('header', '.hdr')) diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 80c4a0ab92..52557d353d 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -9,16 +9,16 @@ """Testing filesets - a draft """ +from io import BytesIO + import numpy as np +import pytest +from numpy.testing import assert_array_equal -from .. import Nifti1Image, Nifti1Pair, MGHImage, all_image_classes -from io import BytesIO +from .. import MGHImage, Nifti1Image, Nifti1Pair, all_image_classes from ..fileholders import FileHolderError from ..spatialimages import SpatialImage -from numpy.testing import assert_array_equal -import pytest - def test_files_spatialimages(): # test files creation in image classes diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index e98fd473a0..781f17d716 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -1,36 +1,35 @@ """Test slicing of file-like objects""" +import time +from functools import partial from io import BytesIO from itertools import product -from functools import partial -from threading import Thread, Lock -import time +from threading import Lock, Thread import numpy as np +import pytest +from numpy.testing import assert_array_equal from ..fileslice import ( - is_fancy, + _positive_slice, + _simple_fileslice, + calc_slicedefs, canonical_slicers, fileslice, + fill_slicer, + is_fancy, + optimize_read_slicers, + optimize_slicer, predict_shape, read_segments, - _positive_slice, - threshold_heuristic, - optimize_slicer, slice2len, - fill_slicer, - optimize_read_slicers, - slicers2segments, - calc_slicedefs, - _simple_fileslice, slice2outax, + slicers2segments, strided_scalar, + threshold_heuristic, ) -import pytest -from numpy.testing import assert_array_equal - def _check_slice(sliceobj): # Fancy indexing always returns a copy, basic indexing returns a view diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index 3544b88977..21c7676fce 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -10,10 +10,9 @@ """ -from ..fileutils import read_zt_byte_strings - import pytest +from ..fileutils import read_zt_byte_strings from ..tmpdirs import InTemporaryDirectory diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 62df671aca..321eb1b961 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -2,28 +2,26 @@ """ import sys - import numpy as np +import pytest from ..casting import ( - floor_exact, - ceil_exact, - as_int, FloatingError, - int_to_float, - floor_log2, - type_info, - _check_nmant, _check_maxexp, - ok_floats, - on_powerpc, + _check_nmant, + as_int, + ceil_exact, + floor_exact, + floor_log2, have_binary128, + int_to_float, longdouble_precision_improved, + ok_floats, + on_powerpc, + type_info, ) from ..testing import suppress_warnings -import pytest - IEEE_floats = [np.float16, np.float32, np.float64] LD_INFO = type_info(np.longdouble) diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index e1a7ec9264..752aed0b52 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -9,17 +9,15 @@ """Test for image funcs""" import numpy as np +import pytest +from numpy.testing import assert_array_equal -from ..funcs import concat_images, as_closest_canonical, OrientationError from ..analyze import AnalyzeImage -from ..nifti1 import Nifti1Image +from ..funcs import OrientationError, as_closest_canonical, concat_images from ..loadsave import save - +from ..nifti1 import Nifti1Image from ..tmpdirs import InTemporaryDirectory -from numpy.testing import assert_array_equal -import pytest - _counter = 0 diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 57a0322cab..af82c304ac 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -23,11 +23,11 @@ cached, but False otherwise. """ +import io +import pathlib import warnings from functools import partial from itertools import product -import io -import pathlib import numpy as np @@ -36,45 +36,47 @@ _, have_scipy, _ = optional_package('scipy') _, have_h5py, _ = optional_package('h5py') +import unittest + +import pytest +from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal, assert_warns + +from nibabel.arraywriters import WriterError +from nibabel.testing import ( + assert_data_similar, + bytesio_filemap, + bytesio_round_trip, + clear_and_catch_warnings, + expires, + nullcontext, +) + from .. import ( AnalyzeImage, - Spm99AnalyzeImage, - Spm2AnalyzeImage, - Nifti1Pair, - Nifti1Image, - Nifti2Pair, - Nifti2Image, GiftiImage, MGHImage, Minc1Image, Minc2Image, + Nifti1Image, + Nifti1Pair, + Nifti2Image, + Nifti2Pair, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + brikhead, is_proxy, + minc1, + minc2, + parrec, ) -from ..spatialimages import SpatialImage -from .. import minc1, minc2, parrec, brikhead from ..deprecator import ExpiredDeprecationError - -import unittest -import pytest - -from numpy.testing import assert_almost_equal, assert_array_equal, assert_warns, assert_allclose -from nibabel.testing import ( - bytesio_round_trip, - bytesio_filemap, - assert_data_similar, - clear_and_catch_warnings, - nullcontext, - expires, -) +from ..spatialimages import SpatialImage from ..tmpdirs import InTemporaryDirectory - from .test_api_validators import ValidateAPI +from .test_brikhead import EXAMPLE_IMAGES as AFNI_EXAMPLE_IMAGES from .test_minc1 import EXAMPLE_IMAGES as MINC1_EXAMPLE_IMAGES from .test_minc2 import EXAMPLE_IMAGES as MINC2_EXAMPLE_IMAGES from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES -from .test_brikhead import EXAMPLE_IMAGES as AFNI_EXAMPLE_IMAGES - -from nibabel.arraywriters import WriterError def maybe_deprecated(meth_name): diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 13c403285c..962a2433bf 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -7,43 +7,42 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for loader function""" -from io import BytesIO - +import logging +import pathlib import shutil -from os.path import dirname, join as pjoin +from io import BytesIO +from os.path import dirname +from os.path import join as pjoin from tempfile import mkdtemp -import pathlib -import logging import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from .. import analyze as ana -from .. import spm99analyze as spm99 -from .. import spm2analyze as spm2 -from .. import nifti1 as ni1 -from .. import loadsave as nils from .. import ( - Nifti1Image, + AnalyzeImage, + MGHImage, + Minc1Image, + Minc2Image, Nifti1Header, + Nifti1Image, Nifti1Pair, Nifti2Image, Nifti2Pair, - Minc1Image, - Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - AnalyzeImage, - MGHImage, all_image_classes, ) -from ..tmpdirs import InTemporaryDirectory -from ..volumeutils import native_code, swapped_code +from .. import analyze as ana +from .. import loadsave as nils +from .. import nifti1 as ni1 +from .. import spm2analyze as spm2 +from .. import spm99analyze as spm99 from ..optpkg import optional_package from ..spatialimages import SpatialImage from ..testing import expires - -from numpy.testing import assert_array_equal, assert_array_almost_equal -import pytest +from ..tmpdirs import InTemporaryDirectory +from ..volumeutils import native_code, swapped_code _, have_scipy, _ = optional_package('scipy') # No scipy=>no SPM-format writing DATA_PATH = pjoin(dirname(__file__), 'data') diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index fd9927eb00..f8186f4147 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -9,28 +9,28 @@ """Tests for is_image / may_contain_header functions""" import copy -from os.path import dirname, basename, join as pjoin +from os.path import basename, dirname +from os.path import join as pjoin import numpy as np from .. import ( - Nifti1Image, + AnalyzeHeader, + AnalyzeImage, + MGHImage, + Minc1Image, + Minc2Image, Nifti1Header, + Nifti1Image, Nifti1Pair, - Nifti2Image, Nifti2Header, + Nifti2Image, Nifti2Pair, - AnalyzeImage, - AnalyzeHeader, - Minc1Image, - Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - MGHImage, all_image_classes, ) - DATA_PATH = pjoin(dirname(__file__), 'data') diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 472e1c5d63..74f05dc6e3 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -1,21 +1,19 @@ """Testing imageclasses module """ -from os.path import dirname, join as pjoin import warnings +from os.path import dirname +from os.path import join as pjoin import numpy as np - import pytest import nibabel as nib +from nibabel import imageclasses from nibabel.analyze import AnalyzeImage +from nibabel.imageclasses import spatial_axes_first from nibabel.nifti1 import Nifti1Image from nibabel.nifti2 import Nifti2Image - -from nibabel import imageclasses -from nibabel.imageclasses import spatial_axes_first - from nibabel.optpkg import optional_package have_h5py = optional_package('h5py')[1] diff --git a/nibabel/tests/test_imagestats.py b/nibabel/tests/test_imagestats.py index 47dd2ecbd5..8adfc910a8 100644 --- a/nibabel/tests/test_imagestats.py +++ b/nibabel/tests/test_imagestats.py @@ -10,8 +10,7 @@ import numpy as np -from .. import imagestats -from .. import Nifti1Image +from .. import Nifti1Image, imagestats def test_mask_volume(): diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index c227889e59..ff4dc082f6 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -1,8 +1,10 @@ -import nibabel as nib -from pkg_resources import resource_filename -import pytest from unittest import mock +import pytest +from pkg_resources import resource_filename + +import nibabel as nib + @pytest.mark.parametrize( 'verbose, v_args', [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index f8cc168cfd..3b58772b6a 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -1,34 +1,33 @@ """Testing loadsave module """ -from os.path import dirname, join as pjoin -import shutil import pathlib +import shutil +from os.path import dirname +from os.path import join as pjoin import numpy as np from .. import ( - Spm99AnalyzeImage, - Spm2AnalyzeImage, - Nifti1Pair, Nifti1Image, - Nifti2Pair, + Nifti1Pair, Nifti2Image, + Nifti2Pair, + Spm2AnalyzeImage, + Spm99AnalyzeImage, ) -from ..loadsave import load, read_img_data, _signature_matches_extension from ..filebasedimages import ImageFileError -from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory +from ..loadsave import _signature_matches_extension, load, read_img_data from ..openers import Opener -from ..testing import expires - from ..optpkg import optional_package +from ..testing import expires +from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory _, have_scipy, _ = optional_package('scipy') _, have_pyzstd, _ = optional_package('pyzstd') -from numpy.testing import assert_almost_equal, assert_array_equal - import pytest +from numpy.testing import assert_almost_equal, assert_array_equal data_path = pjoin(dirname(__file__), 'data') diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 4556f76787..3eeefaa84b 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -7,29 +7,25 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from os.path import join as pjoin - -import gzip import bz2 -import warnings +import gzip import types +import warnings from io import BytesIO +from os.path import join as pjoin import numpy as np +import pytest +from numpy.testing import assert_array_equal -from .. import load, Nifti1Image -from ..externals.netcdf import netcdf_file +from .. import Nifti1Image, load, minc1 from ..deprecated import ModuleProxy -from .. import minc1 +from ..deprecator import ExpiredDeprecationError +from ..externals.netcdf import netcdf_file from ..minc1 import Minc1File, Minc1Image, MincHeader from ..optpkg import optional_package - +from ..testing import assert_data_similar, clear_and_catch_warnings, data_path from ..tmpdirs import InTemporaryDirectory -from ..deprecator import ExpiredDeprecationError -from ..testing import assert_data_similar, data_path, clear_and_catch_warnings -from numpy.testing import assert_array_equal -import pytest - from . import test_spatialimages as tsi from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index 3e220ef2d1..bd06456c33 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -13,10 +13,8 @@ from .. import minc2 from ..minc2 import Minc2File, Minc2Image - from ..optpkg import optional_package from ..testing import data_path - from . import test_minc1 as tm2 h5py, have_h5py, setup_module = optional_package('h5py') diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index 03fb93cbea..e96e716699 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -13,12 +13,12 @@ from os.path import join as pjoin import numpy as np +from numpy.testing import assert_almost_equal, assert_array_equal -from .nibabel_data import get_nibabel_data, needs_nibabel_data -from .. import load as top_load, Nifti1Image +from .. import Nifti1Image +from .. import load as top_load from ..optpkg import optional_package - -from numpy.testing import assert_array_equal, assert_almost_equal +from .nibabel_data import get_nibabel_data, needs_nibabel_data h5py, have_h5py, setup_module = optional_package('h5py') diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index 082d053805..848579cee6 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -10,10 +10,10 @@ """ -from numpy.testing import assert_almost_equal import pytest +from numpy.testing import assert_almost_equal -from ..mriutils import calculate_dwell_time, MRIError +from ..mriutils import MRIError, calculate_dwell_time def test_calculate_dwell_time(): diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index ec97108e35..1687589549 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -2,11 +2,12 @@ """ import os -from os.path import dirname, realpath, join as pjoin, isdir +from os.path import dirname, isdir +from os.path import join as pjoin +from os.path import realpath from . import nibabel_data as nibd - MY_DIR = dirname(__file__) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 0018dfe842..59bf214eda 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -8,55 +8,51 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for nifti reading package""" import os -import warnings import struct +import unittest +import warnings +from io import BytesIO import numpy as np +import pytest +from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_array_equal from nibabel import nifti1 as nifti1 from nibabel.affines import from_matvec -from nibabel.casting import type_info, have_binary128 +from nibabel.casting import have_binary128, type_info from nibabel.eulerangles import euler2mat -from io import BytesIO from nibabel.nifti1 import ( - load, + Nifti1DicomExtension, + Nifti1Extension, + Nifti1Extensions, Nifti1Header, - Nifti1PairHeader, Nifti1Image, Nifti1Pair, - Nifti1Extension, - Nifti1DicomExtension, - Nifti1Extensions, + Nifti1PairHeader, data_type_codes, extension_codes, + load, slice_order_codes, ) +from nibabel.optpkg import optional_package from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory -from nibabel.optpkg import optional_package + from ..freesurfer import load as mghload from ..orientations import aff2axcodes - -from .test_arraywriters import rt_err_estimate, IUINT_TYPES -from .test_orientations import ALL_ORNTS -from .nibabel_data import get_nibabel_data, needs_nibabel_data - -from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal - from ..testing import ( + bytesio_filemap, + bytesio_round_trip, clear_and_catch_warnings, data_path, runif_extra_has, suppress_warnings, - bytesio_filemap, - bytesio_round_trip, ) - -import unittest -import pytest - from . import test_analyze as tana from . import test_spm99analyze as tspm +from .nibabel_data import get_nibabel_data, needs_nibabel_data +from .test_arraywriters import IUINT_TYPES, rt_err_estimate +from .test_orientations import ALL_ORNTS header_file = os.path.join(data_path, 'nifti1.hdr') image_file = os.path.join(data_path, 'example4d.nii.gz') diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index 57a97a1322..742ef148bf 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -10,16 +10,13 @@ import os import numpy as np - -from .. import nifti2 -from ..nifti1 import Nifti1Header, Nifti1PairHeader, Nifti1Extension, Nifti1Extensions -from ..nifti2 import Nifti2Header, Nifti2PairHeader, Nifti2Image, Nifti2Pair - -from . import test_nifti1 as tn1 - from numpy.testing import assert_array_equal +from .. import nifti2 +from ..nifti1 import Nifti1Extension, Nifti1Extensions, Nifti1Header, Nifti1PairHeader +from ..nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair, Nifti2PairHeader from ..testing import data_path +from . import test_nifti1 as tn1 header_file = os.path.join(data_path, 'nifti2.hdr') image_file = os.path.join(data_path, 'example_nifti2.nii.gz') diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index 2659b7fbbc..426702fa43 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,4 +1,5 @@ import pytest + from nibabel.onetime import auto_attr, setattr_on_read from nibabel.testing import expires diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 2a306079f4..5219cb27ac 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -7,29 +7,23 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for openers module""" -import os import contextlib -from gzip import GzipFile -from io import BytesIO, UnsupportedOperation -from packaging.version import Version import hashlib +import os import time - -from numpy.compat.py3k import asstr, asbytes -from ..openers import ( - Opener, - ImageOpener, - HAVE_INDEXED_GZIP, - BZ2File, - DeterministicGzipFile, -) -from ..tmpdirs import InTemporaryDirectory -from ..optpkg import optional_package - import unittest +from gzip import GzipFile +from io import BytesIO, UnsupportedOperation from unittest import mock + import pytest +from numpy.compat.py3k import asbytes, asstr +from packaging.version import Version + from ..deprecator import ExpiredDeprecationError +from ..openers import HAVE_INDEXED_GZIP, BZ2File, DeterministicGzipFile, ImageOpener, Opener +from ..optpkg import optional_package +from ..tmpdirs import InTemporaryDirectory pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') diff --git a/nibabel/tests/test_optpkg.py b/nibabel/tests/test_optpkg.py index 875c32bbdf..7ffaa2f851 100644 --- a/nibabel/tests/test_optpkg.py +++ b/nibabel/tests/test_optpkg.py @@ -1,14 +1,13 @@ """Testing optpkg module """ -from unittest import mock -import types -import sys import builtins -from packaging.version import Version +import sys +import types +from unittest import SkipTest, mock -from unittest import SkipTest import pytest +from packaging.version import Version from nibabel.optpkg import optional_package from nibabel.tripwire import TripWire, TripWireError diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 5d786c0eac..16f7f5ce46 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -8,29 +8,26 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Testing for orientations module""" -import numpy as np import warnings +import numpy as np import pytest - from numpy.testing import assert_array_equal +from ..affines import from_matvec, to_matvec from ..orientations import ( - io_orientation, - ornt_transform, - inv_ornt_aff, - flip_axis, - apply_orientation, OrientationError, - ornt2axcodes, - axcodes2ornt, aff2axcodes, + apply_orientation, + axcodes2ornt, + flip_axis, + inv_ornt_aff, + io_orientation, + ornt2axcodes, + ornt_transform, ) - -from ..affines import from_matvec, to_matvec from ..testing import expires - IN_ARRS = [ np.eye(4), [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 0eca2fdca4..e50b609da4 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -1,38 +1,35 @@ """Testing parrec module """ -from os.path import join as pjoin, dirname, basename from glob import glob +from os.path import basename, dirname +from os.path import join as pjoin from warnings import simplefilter import numpy as np +import pytest from numpy import array as npa +from numpy.testing import assert_almost_equal, assert_array_equal from .. import load as top_load -from ..nifti1 import Nifti1Image, Nifti1Extension, Nifti1Header from .. import parrec +from ..fileholders import FileHolder +from ..nifti1 import Nifti1Extension, Nifti1Header, Nifti1Image +from ..openers import ImageOpener from ..parrec import ( - parse_PAR_header, - PARRECHeader, + PARRECArrayProxy, PARRECError, - vol_numbers, - vol_is_full, + PARRECHeader, PARRECImage, - PARRECArrayProxy, exts2pars, + parse_PAR_header, + vol_is_full, + vol_numbers, ) -from ..openers import ImageOpener -from ..fileholders import FileHolder +from ..testing import assert_arr_dict_equal, clear_and_catch_warnings, suppress_warnings from ..volumeutils import array_from_file - -from numpy.testing import assert_almost_equal, assert_array_equal - -import pytest -from ..testing import clear_and_catch_warnings, suppress_warnings, assert_arr_dict_equal - -from .test_arrayproxy import check_mmap from . import test_spatialimages as tsi - +from .test_arrayproxy import check_mmap DATA_PATH = pjoin(dirname(__file__), 'data') EG_PAR = pjoin(DATA_PATH, 'phantom_EPI_asc_CLEAR_2_1.PAR') diff --git a/nibabel/tests/test_parrec_data.py b/nibabel/tests/test_parrec_data.py index 1179a21264..a437fafeda 100644 --- a/nibabel/tests/test_parrec_data.py +++ b/nibabel/tests/test_parrec_data.py @@ -1,21 +1,21 @@ """Test we can correctly import example PARREC files """ +import unittest from glob import glob -from os.path import join as pjoin, basename, splitext, exists +from os.path import basename, exists +from os.path import join as pjoin +from os.path import splitext import numpy as np +import pytest +from numpy.testing import assert_almost_equal from .. import load as top_load -from ..parrec import load from ..affines import voxel_sizes - +from ..parrec import load from .nibabel_data import get_nibabel_data, needs_nibabel_data -import unittest -import pytest -from numpy.testing import assert_almost_equal - BALLS = pjoin(get_nibabel_data(), 'nitest-balls1') OBLIQUE = pjoin(get_nibabel_data(), 'parrec_oblique') diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 0583add021..32059c68d8 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -1,13 +1,13 @@ """Testing package info """ +import pytest from packaging.version import Version import nibabel as nib from nibabel.pkg_info import cmp_pkg_version -from ..info import VERSION -import pytest +from ..info import VERSION def test_pkg_info(): diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index cd7c1830ea..dc877d3802 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -9,8 +9,9 @@ """Testing processing module """ -from os.path import dirname, join as pjoin import logging +from os.path import dirname +from os.path import join as pjoin import numpy as np import numpy.linalg as npl @@ -19,28 +20,28 @@ spnd, have_scipy, _ = optional_package('scipy.ndimage') +import unittest + +import pytest +from numpy.testing import assert_almost_equal, assert_array_equal + import nibabel as nib +from nibabel.affines import AffineError, apply_affine, from_matvec, to_matvec, voxel_sizes +from nibabel.eulerangles import euler2mat +from nibabel.nifti1 import Nifti1Image +from nibabel.nifti2 import Nifti2Image +from nibabel.orientations import aff2axcodes, inv_ornt_aff from nibabel.processing import ( - sigma2fwhm, - fwhm2sigma, adapt_affine, + conform, + fwhm2sigma, resample_from_to, resample_to_output, + sigma2fwhm, smooth_image, - conform, ) -from nibabel.nifti1 import Nifti1Image -from nibabel.nifti2 import Nifti2Image -from nibabel.orientations import aff2axcodes, inv_ornt_aff -from nibabel.affines import AffineError, from_matvec, to_matvec, apply_affine, voxel_sizes -from nibabel.eulerangles import euler2mat - -from numpy.testing import assert_almost_equal, assert_array_equal -import unittest -import pytest - -from nibabel.tests.test_spaces import assert_all_in, get_outspace_params from nibabel.testing import assert_allclose_safely +from nibabel.tests.test_spaces import assert_all_in, get_outspace_params needs_scipy = unittest.skipUnless(have_scipy, 'These tests need scipy') diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index c2ca1ed27c..dfac167690 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -28,38 +28,31 @@ These last are to allow the proxy to be re-used with different images. """ -from os.path import join as pjoin +import unittest import warnings -from itertools import product from io import BytesIO +from itertools import product +from os.path import join as pjoin import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal -from ..volumeutils import apply_read_scaling +from .. import ecat, minc1, minc2, parrec from ..analyze import AnalyzeHeader -from ..spm99analyze import Spm99AnalyzeHeader -from ..spm2analyze import Spm2AnalyzeHeader -from ..nifti1 import Nifti1Header -from ..freesurfer.mghformat import MGHHeader -from .. import minc1 -from ..externals.netcdf import netcdf_file -from .. import minc2 -from .. import ecat -from .. import parrec -from ..casting import have_binary128 - from ..arrayproxy import ArrayProxy, is_proxy - -import unittest -import pytest -from numpy.testing import assert_almost_equal, assert_array_equal, assert_allclose - -from ..testing import data_path as DATA_PATH, assert_dt_equal, clear_and_catch_warnings +from ..casting import have_binary128 from ..deprecator import ExpiredDeprecationError +from ..externals.netcdf import netcdf_file +from ..freesurfer.mghformat import MGHHeader +from ..nifti1 import Nifti1Header from ..optpkg import optional_package - +from ..spm2analyze import Spm2AnalyzeHeader +from ..spm99analyze import Spm99AnalyzeHeader +from ..testing import assert_dt_equal, clear_and_catch_warnings +from ..testing import data_path as DATA_PATH from ..tmpdirs import InTemporaryDirectory - +from ..volumeutils import apply_read_scaling from .test_api_validators import ValidateAPI from .test_parrec import EG_REC, VARY_REC diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index 3dc681f517..a3e63dd851 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -9,14 +9,12 @@ """Test quaternion calculations""" import numpy as np -from numpy import pi - import pytest - +from numpy import pi from numpy.testing import assert_array_almost_equal, assert_array_equal -from .. import quaternions as nq from .. import eulerangles as nea +from .. import quaternions as nq # Example rotations eg_rots = [] diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 1d903d6f9f..49a9898ce2 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -9,11 +9,10 @@ """Tests recoder class""" import numpy as np - -from ..volumeutils import Recoder, DtypeMapper, native_code, swapped_code - import pytest +from ..volumeutils import DtypeMapper, Recoder, native_code, swapped_code + def test_recoder_1(): # simplest case, no aliases diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 9300dfa207..939895abbd 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -1,8 +1,10 @@ -from ..pkg_info import cmp_pkg_version import unittest from unittest import mock + import pytest +from ..pkg_info import cmp_pkg_version + MODULE_SCHEDULE = [ ('5.0.0', ['nibabel.keywordonly', 'nibabel.py3k']), ('4.0.0', ['nibabel.trackvis']), diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 54ab79a928..cb754d0b54 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -3,16 +3,16 @@ Test arrays with a range of numerical values, integer and floating point. """ -import numpy as np - from io import BytesIO -from .. import Nifti1Image, Nifti1Header -from ..spatialimages import HeaderDataError, supported_np_types -from ..arraywriters import ScalingError -from ..casting import best_float, ulp, type_info +import numpy as np from numpy.testing import assert_array_equal +from .. import Nifti1Header, Nifti1Image +from ..arraywriters import ScalingError +from ..casting import best_float, type_info, ulp +from ..spatialimages import HeaderDataError, supported_np_types + DEBUG = False diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index 55a0aace7c..847b7a4eee 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -2,11 +2,10 @@ """ import numpy as np +import pytest from ..rstutils import rst_table -import pytest - def test_rst_table(): # Tests for printable table function diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index e705a96c83..2fbe88a1a7 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -8,19 +8,17 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for scaling / rounding in volumeutils module""" -import numpy as np import warnings - from io import BytesIO -from ..volumeutils import finite_range, apply_read_scaling, array_to_file, array_from_file -from ..casting import type_info -from ..testing import suppress_warnings - -from .test_volumeutils import _calculate_scale -from numpy.testing import assert_array_almost_equal, assert_array_equal +import numpy as np import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from ..casting import type_info +from ..testing import suppress_warnings +from ..volumeutils import apply_read_scaling, array_from_file, array_to_file, finite_range +from .test_volumeutils import _calculate_scale # Debug print statements DEBUG = True diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index e4006788c1..a089fb7eef 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -5,30 +5,31 @@ Test running scripts """ -import sys +import csv import os import shutil -from os.path import dirname, join as pjoin, abspath, splitext, basename, exists -import csv +import sys +import unittest from glob import glob +from os.path import abspath, basename, dirname, exists +from os.path import join as pjoin +from os.path import splitext import numpy as np +import pytest +from numpy.testing import assert_almost_equal import nibabel as nib -from ..tmpdirs import InTemporaryDirectory + from ..loadsave import load from ..orientations import aff2axcodes, inv_ornt_aff - -import unittest -import pytest -from numpy.testing import assert_almost_equal - -from .scriptrunner import ScriptRunner +from ..testing import assert_data_similar, assert_dt_equal, assert_re_in +from ..tmpdirs import InTemporaryDirectory from .nibabel_data import needs_nibabel_data -from ..testing import assert_dt_equal, assert_re_in -from .test_parrec import DTI_PAR_BVECS, DTI_PAR_BVALS, EXAMPLE_IMAGES as PARREC_EXAMPLES -from .test_parrec_data import BALLS, AFF_OFF -from ..testing import assert_data_similar +from .scriptrunner import ScriptRunner +from .test_parrec import DTI_PAR_BVALS, DTI_PAR_BVECS +from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLES +from .test_parrec_data import AFF_OFF, BALLS def _proc_stdout(stdout): diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index 3e3f2ab0a8..83dec9256c 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -3,14 +3,13 @@ import numpy as np import numpy.linalg as npl +import pytest +from numpy.testing import assert_almost_equal -from ..spaces import vox2out_vox, slice2volume from ..affines import apply_affine, from_matvec -from ..nifti1 import Nifti1Image from ..eulerangles import euler2mat - -import pytest -from numpy.testing import assert_almost_equal +from ..nifti1 import Nifti1Image +from ..spaces import slice2volume, vox2out_vox def assert_all_in(in_shape, in_affine, out_shape, out_affine): diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index cdbe8dc9f2..2a1da21bdd 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -10,27 +10,24 @@ """ import warnings - -import numpy as np - from io import BytesIO -from ..spatialimages import SpatialHeader, SpatialImage, HeaderDataError -from ..imageclasses import spatial_axes_first +from unittest import TestCase +import numpy as np import pytest -from unittest import TestCase from numpy.testing import assert_array_almost_equal +from .. import load as top_load +from ..imageclasses import spatial_axes_first +from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage from ..testing import ( bytesio_round_trip, clear_and_catch_warnings, - suppress_warnings, - memmap_after_ufunc, expires, + memmap_after_ufunc, + suppress_warnings, ) - from ..tmpdirs import InTemporaryDirectory -from .. import load as top_load def test_header_init(): diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index 9881a23d07..7e3d048de5 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -9,14 +9,11 @@ """Tests for SPM2 header stuff""" import numpy as np - -from ..spatialimages import HeaderTypeError, HeaderDataError -from ..spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage - import pytest from numpy.testing import assert_array_equal - +from ..spatialimages import HeaderDataError, HeaderTypeError +from ..spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage from . import test_spm99analyze diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 9d04643d2a..e5eb969388 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -7,14 +7,13 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import numpy as np import itertools - +import unittest from io import BytesIO -from numpy.testing import assert_array_equal, assert_array_almost_equal -import unittest +import numpy as np import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from ..optpkg import optional_package @@ -24,18 +23,16 @@ # files needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') -from ..spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage, HeaderTypeError -from ..casting import type_info, shared_range -from ..volumeutils import apply_read_scaling, _dt_min_max -from ..spatialimages import supported_np_types, HeaderDataError - +from ..casting import shared_range, type_info +from ..spatialimages import HeaderDataError, supported_np_types +from ..spm99analyze import HeaderTypeError, Spm99AnalyzeHeader, Spm99AnalyzeImage from ..testing import ( - bytesio_round_trip, - bytesio_filemap, assert_allclose_safely, + bytesio_filemap, + bytesio_round_trip, suppress_warnings, ) - +from ..volumeutils import _dt_min_max, apply_read_scaling from . import test_analyze FLOAT_TYPES = np.sctypes['float'] diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 11a46bafdb..38c815d4c8 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -1,23 +1,23 @@ """Tests for warnings context managers """ -import sys import os +import sys import warnings import numpy as np +import pytest from ..testing import ( - error_warnings, - suppress_warnings, - clear_and_catch_warnings, assert_allclose_safely, - get_fresh_mod, assert_re_in, - test_data, + clear_and_catch_warnings, data_path, + error_warnings, + get_fresh_mod, + suppress_warnings, + test_data, ) -import pytest def test_assert_allclose_safely(): diff --git a/nibabel/tests/test_tmpdirs.py b/nibabel/tests/test_tmpdirs.py index 2c0c5199ce..3b2e5d5466 100644 --- a/nibabel/tests/test_tmpdirs.py +++ b/nibabel/tests/test_tmpdirs.py @@ -1,11 +1,10 @@ """Test tmpdirs module""" from os import getcwd -from os.path import realpath, abspath, dirname, isfile +from os.path import abspath, dirname, isfile, realpath from ..tmpdirs import InGivenDirectory - MY_PATH = abspath(__file__) MY_DIR = dirname(MY_PATH) diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index 0efddbe8bb..f172d5c579 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -1,10 +1,10 @@ """Testing tripwire module """ -from ..tripwire import TripWire, is_tripwire, TripWireError - import pytest +from ..tripwire import TripWire, TripWireError, is_tripwire + def test_is_tripwire(): assert not is_tripwire(object()) diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 04e616fedd..1649ba62da 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -7,18 +7,16 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +import unittest from collections import namedtuple as nt import numpy as np +import pytest +from numpy.testing import assert_array_equal, assert_equal from ..optpkg import optional_package from ..viewers import OrthoSlicer3D -from numpy.testing import assert_array_equal, assert_equal - -import unittest -import pytest - # Need at least MPL 1.3 for viewer tests. # 2020.02.11 - 1.3 wheels are no longer distributed, so the minimum we test with is 1.5 matplotlib, has_mpl, _ = optional_package('matplotlib', min_version='1.5') diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index c2104b5b59..d8821d308b 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -8,57 +8,53 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for volumeutils module""" -import os -from os.path import exists - -from io import BytesIO -import tempfile -import warnings +import bz2 import functools -import itertools import gzip -import bz2 +import itertools +import os +import tempfile import threading import time -from packaging.version import Version +import warnings +from io import BytesIO +from os.path import exists import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from packaging.version import Version +from nibabel.testing import ( + assert_allclose_safely, + assert_dt_equal, + error_warnings, + suppress_warnings, +) + +from ..casting import OK_FLOATS, floor_log2, shared_range, type_info +from ..openers import BZ2File, ImageOpener, Opener +from ..optpkg import optional_package from ..tmpdirs import InTemporaryDirectory -from ..openers import ImageOpener from ..volumeutils import ( - array_from_file, + _dt_min_max, + _ftype4scaled_finite, _is_compressed_fobj, - array_to_file, - fname_ext_ul_case, - write_zeros, - seek_tell, + _write_data, apply_read_scaling, - working_type, + array_from_file, + array_to_file, best_write_scale_ftype, better_float_of, + fname_ext_ul_case, int_scinter_ftype, make_dt_codes, native_code, - shape_zoom_affine, rec2dict, - _dt_min_max, - _write_data, - _ftype4scaled_finite, -) -from ..openers import Opener, BZ2File -from ..casting import floor_log2, type_info, OK_FLOATS, shared_range - -from ..optpkg import optional_package - -from numpy.testing import assert_array_almost_equal, assert_array_equal -import pytest - -from nibabel.testing import ( - assert_dt_equal, - assert_allclose_safely, - suppress_warnings, - error_warnings, + seek_tell, + shape_zoom_affine, + working_type, + write_zeros, ) pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') @@ -1274,7 +1270,7 @@ def _calculate_scale(data, out_dtype, allow_intercept): out_dtype = np.dtype(out_dtype) if np.can_cast(in_dtype, out_dtype): return 1.0, 0.0, None, None - from ..arraywriters import make_array_writer, WriterError, get_slope_inter + from ..arraywriters import WriterError, get_slope_inter, make_array_writer try: writer = make_array_writer(data, out_dtype, True, allow_intercept) diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 2e4ea6a788..718700768e 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -24,21 +24,18 @@ _field_recoders -> field_recoders """ import logging +from io import BytesIO, StringIO + import numpy as np +import pytest +from numpy.testing import assert_array_equal -from io import BytesIO, StringIO -from ..wrapstruct import WrapStructError, WrapStruct, LabeledWrapStruct +from .. import imageglobals from ..batteryrunners import Report - -from ..volumeutils import swapped_code, native_code, Recoder from ..spatialimages import HeaderDataError -from .. import imageglobals - from ..testing import BaseTestCase - -from numpy.testing import assert_array_equal -import pytest - +from ..volumeutils import Recoder, native_code, swapped_code +from ..wrapstruct import LabeledWrapStruct, WrapStruct, WrapStructError INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index c175940ff7..e8fba870c1 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -10,7 +10,7 @@ """ import os import shutil -from tempfile import template, mkdtemp +from tempfile import mkdtemp, template class TemporaryDirectory: diff --git a/nibabel/viewers.py b/nibabel/viewers.py index c3720d474b..d1c13dfeee 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -4,9 +4,10 @@ Paul Ivanov. """ -import numpy as np import weakref +import numpy as np + from .affines import voxel_sizes from .optpkg import optional_package from .orientations import aff2axcodes, axcodes2ornt diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index f026750e95..d31d91ea01 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -8,19 +8,19 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utility functions for analyze-like formats""" +import gzip import sys import warnings -import gzip from collections import OrderedDict -from os.path import exists, splitext -from operator import mul from functools import reduce +from operator import mul +from os.path import exists, splitext import numpy as np -from .casting import shared_range, OK_FLOATS -from .openers import BZ2File, IndexedGzipFile +from .casting import OK_FLOATS, shared_range from .externals.oset import OrderedSet +from .openers import BZ2File, IndexedGzipFile from .optpkg import optional_package pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index cdc2957dab..bf29e0828a 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -111,9 +111,9 @@ """ import numpy as np -from .volumeutils import pretty_mapping, endian_codes, native_code, swapped_code from . import imageglobals as imageglobals from .batteryrunners import BatteryRunner +from .volumeutils import endian_codes, native_code, pretty_mapping, swapped_code class WrapStructError(Exception): From 263fca9bf6d4ca314a5a322b4824d6f53d0589df Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 29 Dec 2022 21:46:13 -0500 Subject: [PATCH 133/702] STY: Manual, blue-compatible touchups [git-blame-ignore-rev] --- nibabel/analyze.py | 6 +- nibabel/arraywriters.py | 2 +- nibabel/brikhead.py | 22 +- nibabel/casting.py | 16 +- nibabel/cifti2/cifti2.py | 23 +- nibabel/cifti2/cifti2_axes.py | 4 +- nibabel/cifti2/parse_cifti2.py | 2 +- nibabel/cifti2/tests/test_axes.py | 47 +- nibabel/cifti2/tests/test_cifti2.py | 17 +- nibabel/cifti2/tests/test_cifti2io_header.py | 16 +- nibabel/cifti2/tests/test_new_cifti2.py | 55 +- nibabel/cmdline/diff.py | 4 +- nibabel/cmdline/ls.py | 2 +- nibabel/cmdline/nifti_dx.py | 2 +- nibabel/cmdline/parrec2nii.py | 28 +- nibabel/cmdline/tests/test_utils.py | 503 ++++++++----------- nibabel/cmdline/utils.py | 2 +- nibabel/data.py | 6 +- nibabel/dataobj_images.py | 6 +- nibabel/ecat.py | 6 +- nibabel/filebasedimages.py | 2 +- nibabel/filename_parser.py | 2 +- nibabel/fileslice.py | 2 +- nibabel/freesurfer/mghformat.py | 55 +- nibabel/freesurfer/tests/test_mghformat.py | 23 +- nibabel/gifti/gifti.py | 11 +- nibabel/gifti/parse_gifti_fast.py | 4 +- nibabel/gifti/util.py | 6 +- nibabel/loadsave.py | 4 +- nibabel/minc1.py | 6 +- nibabel/minc2.py | 2 +- nibabel/nicom/csareader.py | 6 +- nibabel/nicom/dicomreaders.py | 4 +- nibabel/nicom/dicomwrappers.py | 24 +- nibabel/nicom/tests/test_dicomreaders.py | 2 +- nibabel/nifti1.py | 22 +- nibabel/nifti2.py | 2 +- nibabel/optpkg.py | 2 +- nibabel/orientations.py | 10 +- nibabel/parrec.py | 156 +++--- nibabel/processing.py | 28 +- nibabel/rstutils.py | 20 +- nibabel/spatialimages.py | 23 +- nibabel/spm99analyze.py | 4 +- nibabel/streamlines/__init__.py | 5 +- nibabel/streamlines/array_sequence.py | 2 +- nibabel/streamlines/tck.py | 6 +- nibabel/streamlines/trk.py | 23 +- nibabel/tests/test_affines.py | 43 +- nibabel/tests/test_analyze.py | 2 +- nibabel/tests/test_brikhead.py | 14 +- nibabel/tests/test_euler.py | 24 +- nibabel/tests/test_fileslice.py | 31 +- nibabel/tests/test_floating.py | 25 +- nibabel/tests/test_image_types.py | 2 +- nibabel/tests/test_minc1.py | 36 +- nibabel/tests/test_minc2.py | 36 +- nibabel/tests/test_openers.py | 4 +- nibabel/tests/test_orientations.py | 159 +++++- nibabel/tests/test_parrec.py | 44 +- nibabel/tests/test_processing.py | 35 +- nibabel/tests/test_proxy_api.py | 14 +- nibabel/tests/test_spaces.py | 16 +- nibabel/tests/test_spatialimages.py | 16 +- nibabel/tests/test_volumeutils.py | 32 +- nibabel/viewers.py | 2 +- 66 files changed, 961 insertions(+), 799 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 4a76350d59..e128239865 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -394,8 +394,8 @@ def from_header(klass, header=None, check=True): obj.set_data_dtype(orig_code) except HeaderDataError: raise HeaderDataError( - f'Input header {header.__class__} has ' - f"datatype {header.get_value_label('datatype')} " + f'Input header {header.__class__} has datatype ' + f'{header.get_value_label("datatype")} ' f'but output header {klass} does not support it' ) obj.set_data_dtype(header.get_data_dtype()) @@ -785,7 +785,7 @@ def set_slope_inter(self, slope, inter=None): """ if (slope in (None, 1) or np.isnan(slope)) and (inter in (None, 0) or np.isnan(inter)): return - raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 ' 'for Analyze headers') + raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 for Analyze headers') @classmethod def _get_checks(klass): diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 59e55b314c..21fd6ba6ee 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -432,7 +432,7 @@ def _range_scale(self, in_min, in_max): if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: raise WriterError( - 'Cannot scale negative and positive ' 'numbers to uint without intercept' + 'Cannot scale negative and positive numbers to uint without intercept' ) if in_max <= 0: # All input numbers <= 0 self.slope = in_min / out_max diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 0559671217..72b09c4d75 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -58,7 +58,12 @@ } space_codes = Recoder( - ((0, 'unknown', ''), (1, 'scanner', 'ORIG'), (3, 'talairach', 'TLRC'), (4, 'mni', 'MNI')), + ( + (0, 'unknown', ''), + (1, 'scanner', 'ORIG'), + (3, 'talairach', 'TLRC'), + (4, 'mni', 'MNI'), + ), fields=('code', 'label', 'space'), ) @@ -104,9 +109,7 @@ def _unpack_var(var): TEMPLATE_SPACE ORIG """ - err_msg = ( - 'Please check HEAD file to ensure it is AFNI compliant. ' f'Offending attribute:\n{var}' - ) + err_msg = f'Please check HEAD file to ensure it is AFNI compliant. Offending attribute:\n{var}' atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}') @@ -119,8 +122,7 @@ def _unpack_var(var): attr = [atype(f) for f in attr.split()] except ValueError: raise AFNIHeaderError( - 'Failed to read variable from HEAD file ' - f'due to improper type casting. {err_msg}' + f'Failed to read variable from HEAD file due to improper type casting. {err_msg}' ) else: # AFNI string attributes will always start with open single quote and @@ -354,13 +356,7 @@ def _calc_zooms(self): origin", and second giving "Time step (TR)". """ xyz_step = tuple(np.abs(self.info['DELTA'])) - t_step = self.info.get( - 'TAXIS_FLOATS', - ( - 0, - 0, - ), - ) + t_step = self.info.get('TAXIS_FLOATS', (0, 0)) if len(t_step) > 0: t_step = (t_step[1],) return xyz_step + t_step diff --git a/nibabel/casting.py b/nibabel/casting.py index ce58915fe9..a17a25a2c8 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -259,15 +259,15 @@ def type_info(np_type): if vals in ( (112, 15, 16), # binary128 (info_64.nmant, info_64.nexp, 8), # float64 - (63, 15, 12), - (63, 15, 16), - ): # Intel extended 80 + (63, 15, 12), # Intel extended 80 + (63, 15, 16), # Intel extended 80 + ): return ret # these are OK without modification # The remaining types are longdoubles with bad finfo values. Some we # correct, others we wait to hear of errors. # We start with float64 as basis ret = type_info(np.float64) - if vals in ((52, 15, 12), (52, 15, 16)): # windows float96 # windows float128? + if vals in ((52, 15, 12), (52, 15, 16)): # windows float96 / windows float128? # On windows 32 bit at least, float96 is Intel 80 storage but operating # at float64 precision. The finfo values give nexp == 15 (as for intel # 80) but in calculations nexp in fact appears to be 11 as for float64 @@ -298,7 +298,13 @@ def type_info(np_type): if np_type is np.longcomplex: max_val += 0j ret = dict( - min=-max_val, max=max_val, nmant=112, nexp=15, minexp=-16382, maxexp=16384, width=width + min=-max_val, + max=max_val, + nmant=112, + nexp=15, + minexp=-16382, + maxexp=16384, + width=width, ) else: # don't recognize the type raise FloatingError(f'We had not expected long double type {np_type} with info {info}') diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 497b796dca..6c141b44f1 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -70,10 +70,15 @@ class Cifti2HeaderError(Exception): CIFTI_MODEL_TYPES = ( 'CIFTI_MODEL_TYPE_SURFACE', # Modeled using surface vertices - 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. + 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. ) -CIFTI_SERIESUNIT_TYPES = ('SECOND', 'HERTZ', 'METER', 'RADIAN') +CIFTI_SERIESUNIT_TYPES = ( + 'SECOND', + 'HERTZ', + 'METER', + 'RADIAN', +) CIFTI_BRAIN_STRUCTURES = ( 'CIFTI_STRUCTURE_ACCUMBENS_LEFT', @@ -662,7 +667,7 @@ def __init__(self, name=None, voxel_indices_ijk=None, vertices=None): self.vertices = vertices if vertices is not None else [] for val in self.vertices: if not isinstance(val, Cifti2Vertices): - raise ValueError('Cifti2Parcel vertices must be instances of ' 'Cifti2Vertices') + raise ValueError('Cifti2Parcel vertices must be instances of Cifti2Vertices') @property def voxel_indices_ijk(self): @@ -1237,7 +1242,7 @@ def _validate_new_mim(self, value): a2md = self._get_indices_from_mim(value) if not set(self.mapped_indices).isdisjoint(a2md): raise Cifti2HeaderError( - 'Indices in this Cifti2MatrixIndicesMap ' 'already mapped in this matrix' + 'Indices in this Cifti2MatrixIndicesMap already mapped in this matrix' ) def __setitem__(self, key, value): @@ -1412,7 +1417,13 @@ class Cifti2Image(DataobjImage, SerializableImage): rw = True def __init__( - self, dataobj=None, header=None, nifti_header=None, extra=None, file_map=None, dtype=None + self, + dataobj=None, + header=None, + nifti_header=None, + extra=None, + file_map=None, + dtype=None, ): """Initialize image @@ -1485,7 +1496,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): cifti_header = item.get_content() break else: - raise ValueError('NIfTI2 header does not contain a CIFTI-2 ' 'extension') + raise ValueError('NIfTI2 header does not contain a CIFTI-2 extension') # Construct cifti image. # Use array proxy object where possible diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 3d88fca1e3..3142c8362b 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -665,7 +665,7 @@ def __add__(self, other): not np.allclose(other.affine, affine) or other.volume_shape != shape ): raise ValueError( - 'Trying to concatenate two BrainModels defined ' 'in a different brain volume' + 'Trying to concatenate two BrainModels defined in a different brain volume' ) nvertices = dict(self.nvertices) @@ -1008,7 +1008,7 @@ def __add__(self, other): not np.allclose(other.affine, affine) or other.volume_shape != shape ): raise ValueError( - 'Trying to concatenate two ParcelsAxis defined ' 'in a different brain volume' + 'Trying to concatenate two ParcelsAxis defined in a different brain volume' ) nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 550d8e30bd..e067144997 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -335,7 +335,7 @@ def StartElementHandler(self, name, attrs): raise Cifti2HeaderError( 'Volume element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - dimensions = tuple([int(val) for val in attrs['VolumeDimensions'].split(',')]) + dimensions = tuple(int(val) for val in attrs['VolumeDimensions'].split(',')) volume = Cifti2Volume(volume_dimensions=dimensions) mim.append(volume) self.fsm_state.append('Volume') diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index b8940433af..4cabd188b1 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -128,7 +128,9 @@ def test_brain_models(): assert (bml[4].vertex == [2, 9, 14]).all() for bm, label, is_surface in zip( - bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'], (False, False, True, True) + bml, + ['ThalamusRight', 'Other', 'cortex_left', 'Other'], + (False, False, True, True), ): assert np.all(bm.surface_mask == ~bm.volume_mask) structures = list(bm.iter_structures()) @@ -176,18 +178,27 @@ def test_brain_models(): # Test the constructor bm_vox = axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), ) assert np.all(bm_vox.name == ['CIFTI_STRUCTURE_THALAMUS_LEFT'] * 5) assert np.array_equal(bm_vox.vertex, np.full(5, -1)) assert np.array_equal(bm_vox.voxel, np.full((5, 3), 1)) with pytest.raises(ValueError): # no volume shape - axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4)) + axes.BrainModelAxis( + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + ) with pytest.raises(ValueError): # no affine axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + volume_shape=(2, 3, 4), ) with pytest.raises(ValueError): # incorrect name @@ -207,7 +218,11 @@ def test_brain_models(): ) with pytest.raises(ValueError): # no voxels or vertices - axes.BrainModelAxis('thalamus_left', affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): # incorrect voxel shape axes.BrainModelAxis( @@ -218,7 +233,9 @@ def test_brain_models(): ) bm_vertex = axes.BrainModelAxis( - 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + 'cortex_left', + vertex=np.ones(5, dtype=int), + nvertices={'cortex_left': 20}, ) assert np.array_equal(bm_vertex.name, ['CIFTI_STRUCTURE_CORTEX_LEFT'] * 5) assert np.array_equal(bm_vertex.vertex, np.full(5, 1)) @@ -227,11 +244,15 @@ def test_brain_models(): axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int)) with pytest.raises(ValueError): axes.BrainModelAxis( - 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20} + 'cortex_left', + vertex=np.ones(5, dtype=int), + nvertices={'cortex_right': 20}, ) with pytest.raises(ValueError): axes.BrainModelAxis( - 'cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20} + 'cortex_left', + vertex=-np.ones(5, dtype=int), + nvertices={'cortex_left': 20}, ) # test from_mask errors @@ -244,7 +265,10 @@ def test_brain_models(): # tests error in adding together or combining as ParcelsAxis bm_vox = axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), ) bm_vox + bm_vox assert (bm_vertex + bm_vox)[: bm_vertex.size] == bm_vertex @@ -289,7 +313,10 @@ def test_brain_models(): # test equalities bm_vox = axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), ) bm_other = deepcopy(bm_vox) assert bm_vox == bm_other diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index 98d97e34e2..bf287b8e03 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -79,10 +79,7 @@ def test_cifti2_metadata(): with pytest.raises(KeyError): md.difference_update({'a': 'aval', 'd': 'dval'}) - assert ( - md.to_xml().decode('utf-8') - == 'bbval' - ) + assert md.to_xml() == b'bbval' def test__float_01(): @@ -195,8 +192,7 @@ def test_cifti2_parcel(): assert len(pl.vertices) == 0 assert ( - pl.to_xml().decode('utf-8') - == '1 2 3' + pl.to_xml() == b'1 2 3' ) @@ -207,7 +203,7 @@ def test_cifti2_vertices(): vs.brain_structure = 'CIFTI_STRUCTURE_OTHER' - assert vs.to_xml().decode('utf-8') == '' + assert vs.to_xml() == b'' assert len(vs) == 0 vs.extend(np.array([0, 1, 2])) @@ -217,10 +213,7 @@ def test_cifti2_vertices(): with pytest.raises(ValueError): vs.insert(1, 'a') - assert ( - vs.to_xml().decode('utf-8') - == '0 1 2' - ) + assert vs.to_xml() == b'0 1 2' vs[0] = 10 assert vs[0] == 10 @@ -254,7 +247,7 @@ def test_cifti2_vertexindices(): vi.to_xml() vi.extend(np.array([0, 1, 2])) assert len(vi) == 3 - assert vi.to_xml().decode('utf-8') == '0 1 2' + assert vi.to_xml() == b'0 1 2' with pytest.raises(ValueError): vi[0] = 'a' diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 7315a0d1f2..8d393686dd 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -11,6 +11,7 @@ from os.path import dirname from os.path import join as pjoin +import numpy as np import pytest from numpy.testing import assert_array_almost_equal from packaging.version import Version @@ -249,12 +250,17 @@ def test_read_geometry(): assert from_file.voxel_indices_ijk[-1] == expected[3] assert current_index == img.shape[1] - expected_affine = [[-2, 0, 0, 90], [0, 2, 0, -126], [0, 0, 2, -72], [0, 0, 0, 1]] + expected_affine = [ + [-2, 0, 0, 90], + [0, 2, 0, -126], + [0, 0, 2, -72], + [0, 0, 0, 1], + ] expected_dimensions = (91, 109, 91) - assert ( - geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix - == expected_affine - ).all() + assert np.array_equal( + geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix, + expected_affine, + ) assert geometry_mapping.volume.volume_dimensions == expected_dimensions diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 84f1376f1f..0f90b822da 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -20,14 +20,27 @@ suppress_warnings, ) -affine = [[-1.5, 0, 0, 90], [0, 1.5, 0, -85], [0, 0, 1.5, -71], [0, 0, 0, 1.0]] +affine = [ + [-1.5, 0, 0, 90], + [0, 1.5, 0, -85], + [0, 0, 1.5, -71], + [0, 0, 0, 1.0], +] dimensions = (120, 83, 78) number_of_vertices = 30000 brain_models = [ - ('CIFTI_STRUCTURE_THALAMUS_LEFT', [[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]]), + ( + 'CIFTI_STRUCTURE_THALAMUS_LEFT', + [ + [60, 60, 60], + [61, 59, 60], + [61, 60, 59], + [80, 90, 92], + ], + ), ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), ('CIFTI_STRUCTURE_CORTEX_RIGHT', [207]), ] @@ -107,7 +120,17 @@ def check_geometry_map(mapping): parcels = [ - ('volume_parcel', ([[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]],)), + ( + 'volume_parcel', + ( + [ + [60, 60, 60], + [61, 59, 60], + [61, 60, 59], + [80, 90, 92], + ], + ), + ), ( 'surface_parcel', ( @@ -117,7 +140,13 @@ def check_geometry_map(mapping): ), ( 'mixed_parcel', - ([[71, 81, 39], [53, 21, 91]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999])), + ( + [ + [71, 81, 39], + [53, 21, 91], + ], + ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999]), + ), ), ('single_element', ([[71, 81, 39]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [40]))), ] @@ -196,9 +225,19 @@ def check_scalar_map(mapping): ( 'first_name', {'meta_key': 'some_metadata'}, - {0: ('label0', (0.1, 0.3, 0.2, 0.5)), 1: ('new_label', (0.5, 0.3, 0.1, 0.4))}, + { + 0: ('label0', (0.1, 0.3, 0.2, 0.5)), + 1: ('new_label', (0.5, 0.3, 0.1, 0.4)), + }, + ), + ( + 'another name', + {}, + { + 0: ('???', (0, 0, 0, 0)), + 1: ('great region', (0.4, 0.1, 0.23, 0.15)), + }, ), - ('another name', {}, {0: ('???', (0, 0, 0, 0)), 1: ('great region', (0.4, 0.1, 0.23, 0.15))}), ] @@ -463,7 +502,7 @@ def test_pconnseries(): hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 13) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SERIES') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SERIES') with InTemporaryDirectory(): ci.save(img, 'test.pconnseries.nii') @@ -486,7 +525,7 @@ def test_pconnscalar(): hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 2) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SCALAR') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SCALAR') with InTemporaryDirectory(): ci.save(img, 'test.pconnscalar.nii') diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 5ca691ad64..799e17f645 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -51,7 +51,7 @@ def get_opt_parser(): '--header-fields', dest='header_fields', default='all', - help='Header fields (comma separated) to be printed as well' ' (if present)', + help='Header fields (comma separated) to be printed as well (if present)', ), Option( '--ma', @@ -59,7 +59,7 @@ def get_opt_parser(): dest='data_max_abs_diff', type=float, default=0.0, - help='Maximal absolute difference in data between files' ' to tolerate.', + help='Maximal absolute difference in data between files to tolerate.', ), Option( '--mr', diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index c78c0910bf..4f504910a2 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -20,7 +20,7 @@ import nibabel.cmdline.utils from nibabel.cmdline.utils import _err, ap, safe_get, table2string, verbose -__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko and NiBabel contributors' __license__ = 'MIT' diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 64f02694ee..103bbf2640 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -15,7 +15,7 @@ import nibabel as nib __author__ = 'Matthew Brett' -__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Matthew Brett and NiBabel contributors' __license__ = 'MIT' diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index d6d3d6afe7..c04a6e0196 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -44,10 +44,7 @@ def get_opt_parser(): type='string', dest='outdir', default=None, - help=one_line( - """Destination directory for NIfTI files. - Default: current directory.""" - ), + help='Destination directory for NIfTI files. Default: current directory.', ) ) p.add_option( @@ -81,10 +78,7 @@ def get_opt_parser(): action='store_true', dest='bvs', default=False, - help=one_line( - """Output bvals/bvecs files in addition to NIFTI - image.""" - ), + help='Output bvals/bvecs files in addition to NIFTI image.', ) ) p.add_option( @@ -207,7 +201,7 @@ def get_opt_parser(): default=False, help=one_line( """Do not discard the diagnostic Philips DTI - trace volume, if it exists in the data.""" + trace volume, if it exists in the data.""" ), ) ) @@ -217,10 +211,7 @@ def get_opt_parser(): action='store_true', dest='overwrite', default=False, - help=one_line( - """Overwrite file if it exists. Default: - False""" - ), + help='Overwrite file if it exists. Default: False', ) ) p.add_option( @@ -300,7 +291,14 @@ def proc_file(infile, opts): out_dtype = np.float64 # Reorient data block to LAS+ if necessary ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine)) - if np.all(ornt == [[0, 1], [1, 1], [2, 1]]): # already in LAS+ + if np.array_equal( + ornt, + [ + [0, 1], + [1, 1], + [2, 1], + ], + ): # already in LAS+ t_aff = np.eye(4) else: # Not in LAS+ t_aff = inv_ornt_aff(ornt, pr_img.shape) @@ -431,6 +429,6 @@ def main(): errs.append(f'{infile}: {e}') if len(errs): - error('Caught %i exceptions. Dump follows:\n\n %s' % (len(errs), '\n'.join(errs)), 1) + error(f'Caught {len(errs)} exceptions. Dump follows:\n\n' + '\n'.join(errs), 1) else: verbose('Done') diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 5f531769a9..6d2e6953fb 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,7 +5,6 @@ Test running scripts """ -from collections import OrderedDict from io import StringIO from os.path import join as pjoin @@ -58,134 +57,87 @@ def get_test(self): def test_get_headers_diff(): fnames = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) - expected_difference = OrderedDict( - [ - ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), - ( - 'dim_info', - [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], - ), - ( - 'dim', - [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), - ], - ), - ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), - ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), - ( - 'pixdim', - [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), - np.array( - [ - -1.00000000e00, - 2.00000000e00, - 2.00000000e00, - 2.19999909e00, - 2.00000000e03, - 1.00000000e00, - 1.00000000e00, - 1.00000000e00, - ] - ).astype(dtype='float32'), - ], - ), - ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), - ( - 'xyzt_units', - [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], - ), - ( - 'cal_max', - [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), - ], - ), - ( - 'descrip', - [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), - ], - ), - ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ( - 'quatern_b', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), - ], - ), - ( - 'quatern_c', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), - ], - ), - ( - 'quatern_d', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), - ], - ), - ( - 'qoffset_x', - [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), - ], - ), - ( - 'qoffset_y', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), - ], - ), - ( - 'qoffset_z', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), - ], - ), - ( - 'srow_x', - [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_y', - [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_z', + expected_difference = { + 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], + 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'dim': [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], + 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'pixdim': [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array( - [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] - ).astype(dtype='float32'), - ], - ), - ] - ) + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], + 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'cal_max': [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + 'descrip': [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'quatern_b': [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + 'quatern_c': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + 'quatern_d': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + 'qoffset_x': [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + 'qoffset_y': [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + 'qoffset_z': [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + 'srow_x': [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( + dtype='float32' + ), + ], + 'srow_y': [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( + dtype='float32' + ), + ], + 'srow_z': [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( + dtype='float32' + ), + ], + } np.testing.assert_equal(actual_difference, expected_difference) @@ -193,25 +145,22 @@ def test_get_headers_diff(): def test_display_diff(): bogus_names = ['hellokitty.nii.gz', 'privettovarish.nii.gz'] - dict_values = OrderedDict( - [ - ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), - ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), - ] - ) - - expected_output = ( - 'These files are different.\n' + 'Field/File 1:hellokitty.nii.gz' - ' ' - '2:privettovarish.nii.gz \n' - 'datatype ' - '2 ' - '4 \n' - 'bitpix ' - '8 16' - ' ' - '\n' - ) + dict_values = { + 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], + 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + } + + expected_output = """\ +These files are different. +Field/File \ +1:hellokitty.nii.gz \ +2:privettovarish.nii.gz \n\ +datatype \ +2 \ +4 \n\ +bitpix \ +8 \ +16 \n""" assert display_diff(bogus_names, dict_values) == expected_output @@ -229,43 +178,33 @@ def test_get_data_diff(): test_array_5 = np.arange(64).reshape(8, 8) # same shape, 2 files - assert get_data_diff([test_array, test_array_2]) == OrderedDict( - [('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])] - ) + assert get_data_diff([test_array, test_array_2]) == { + 'DATA(diff 1:)': [None, {'abs': 1, 'rel': 2.0}] + } # same shape, 3 files - assert get_data_diff([test_array, test_array_2, test_array_3]) == OrderedDict( - [ - ( - 'DATA(diff 1:)', - [ - None, - OrderedDict([('abs', 1), ('rel', 2.0)]), - OrderedDict([('abs', 2), ('rel', 2.0)]), - ], - ), - ( - 'DATA(diff 2:)', - [None, None, OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])], - ), - ] - ) + assert get_data_diff([test_array, test_array_2, test_array_3]) == { + 'DATA(diff 1:)': [ + None, + {'abs': 1, 'rel': 2.0}, + {'abs': 2, 'rel': 2.0}, + ], + 'DATA(diff 2:)': [None, None, {'abs': 1, 'rel': 0.66666666666666663}], + } # same shape, 2 files, modified maximum abs/rel - assert get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2) == OrderedDict() + assert get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2) == {} # different shape, 2 files - assert get_data_diff([test_array_2, test_array_4]) == OrderedDict( - [('DATA(diff 1:)', [None, {'CMP': 'incompat'}])] - ) + assert get_data_diff([test_array_2, test_array_4]) == { + 'DATA(diff 1:)': [None, {'CMP': 'incompat'}] + } # different shape, 3 files - assert get_data_diff([test_array_4, test_array_5, test_array_2]) == OrderedDict( - [ - ('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), - ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}]), - ] - ) + assert get_data_diff([test_array_4, test_array_5, test_array_2]) == { + 'DATA(diff 1:)': [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}], + 'DATA(diff 2:)': [None, None, {'CMP': 'incompat'}], + } test_return = get_data_diff([test_array, test_array_2], dtype=np.float32) assert type(test_return['DATA(diff 1:)'][1]['abs']) is np.float32 @@ -280,138 +219,88 @@ def test_get_data_diff(): def test_main(): test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] - expected_difference = OrderedDict( - [ - ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), - ( - 'dim_info', - [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], - ), - ( - 'dim', + expected_difference = { + 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], + 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'dim': [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], + 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'pixdim': [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), - ], - ), - ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), - ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), - ( - 'pixdim', - [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), - np.array( - [ - -1.00000000e00, - 2.00000000e00, - 2.00000000e00, - 2.19999909e00, - 2.00000000e03, - 1.00000000e00, - 1.00000000e00, - 1.00000000e00, - ] - ).astype(dtype='float32'), - ], - ), - ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), - ( - 'xyzt_units', - [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], - ), - ( - 'cal_max', - [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), - ], - ), - ( - 'descrip', - [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), - ], - ), - ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ( - 'quatern_b', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), - ], - ), - ( - 'quatern_c', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), - ], - ), - ( - 'quatern_d', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), - ], - ), - ( - 'qoffset_x', - [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), - ], - ), - ( - 'qoffset_y', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), - ], - ), - ( - 'qoffset_z', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), - ], - ), - ( - 'srow_x', - [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_y', - [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_z', - [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array( - [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] - ).astype(dtype='float32'), - ], - ), - ( - 'DATA(md5)', - ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], - ), - ] - ) + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], + 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'cal_max': [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + 'descrip': [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'quatern_b': [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + 'quatern_c': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + 'quatern_d': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + 'qoffset_x': [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + 'qoffset_y': [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + 'qoffset_z': [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + 'srow_x': [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( + dtype='float32' + ), + ], + 'srow_y': [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( + dtype='float32' + ), + ], + 'srow_z': [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( + dtype='float32' + ), + ], + 'DATA(md5)': ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], + } with pytest.raises(SystemExit): np.testing.assert_equal(main(test_names, StringIO()), expected_difference) diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 41b10d6b31..8e9d45251e 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -64,7 +64,7 @@ def table2string(table, out=None): atable = np.asarray(table) # eat whole entry while computing width for @w (for wide) markup_strip = re.compile('^@([lrc]|w.*)') - col_width = [max([len(markup_strip.sub('', x)) for x in column]) for column in atable.T] + col_width = [max(len(markup_strip.sub('', x)) for x in column) for column in atable.T] string = '' for i, table_ in enumerate(table): string_ = '' diff --git a/nibabel/data.py b/nibabel/data.py index eaa6e77acf..42826d2f67 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -13,9 +13,7 @@ from .environment import get_nipy_system_dir, get_nipy_user_dir -DEFAULT_INSTALL_HINT = ( - 'If you have the package, have you set the ' 'path to the package correctly?' -) +DEFAULT_INSTALL_HINT = 'If you have the package, have you set the path to the package correctly?' class DataError(Exception): @@ -135,7 +133,7 @@ def __init__(self, base_path, config_filename=None): version_parts = self.version.split('.') self.major_version = int(version_parts[0]) self.minor_version = int(version_parts[1]) - self.version_no = float('%d.%d' % (self.major_version, self.minor_version)) + self.version_no = float(f'{self.major_version}.{self.minor_version}') def _cfg_value(fname, section='DATA', value='path'): diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 054bba5272..64ef906820 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -48,10 +48,8 @@ def dataobj(self): return self._dataobj @deprecate_with_version( - 'get_data() is deprecated in favor of get_fdata(),' - ' which has a more predictable return type. To ' - 'obtain get_data() behavior going forward, use ' - 'numpy.asanyarray(img.dataobj).', + 'get_data() is deprecated in favor of get_fdata(), which has a more predictable return ' + 'type. To obtain get_data() behavior going forward, use numpy.asanyarray(img.dataobj).', '3.0', '5.0', ) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 03d3f26a74..d151465933 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -822,7 +822,7 @@ def __init__(self, dataobj, affine, header, subheader, mlist, extra=None, file_m def affine(self): if not self._subheader._check_affines(): warnings.warn( - 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + 'Affines different across frames, loading affine from FIRST frame', UserWarning ) return self._affine @@ -893,7 +893,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Get affine if not subheaders._check_affines(): warnings.warn( - 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + 'Affines different across frames, loading affine from FIRST frame', UserWarning ) aff = subheaders.get_frame_affine() img = klass(data, aff, header, subheaders, mlist, extra=None, file_map=file_map) @@ -1010,7 +1010,7 @@ def to_file_map(self, file_map=None): @classmethod def from_image(klass, img): - raise NotImplementedError('Ecat images can only be generated ' 'from file objects') + raise NotImplementedError('Ecat images can only be generated from file objects') @classmethod def load(klass, filespec): diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index eee822566b..e37a698f2f 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -35,7 +35,7 @@ def from_header(klass, header=None): if type(header) == klass: return header.copy() raise NotImplementedError( - 'Header class requires a conversion ' f'from {klass} to {type(header)}' + f'Header class requires a conversion from {klass} to {type(header)}' ) @classmethod diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 42e89fa721..77949a6791 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -114,7 +114,7 @@ def types_filenames( """ template_fname = _stringify_path(template_fname) if not isinstance(template_fname, str): - raise TypesFilenamesError('Need file name as input ' 'to set_filenames') + raise TypesFilenamesError('Need file name as input to set_filenames') if template_fname.endswith('.'): template_fname = template_fname[:-1] filename, found_ext, ignored, guessed_name = parse_filename( diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 75da3ff85f..87cac05a4a 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -104,7 +104,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if slicer == Ellipsis: remaining = sliceobj[i + 1 :] if Ellipsis in remaining: - raise ValueError('More than one Ellipsis in slicing ' 'expression') + raise ValueError('More than one Ellipsis in slicing expression') real_remaining = [r for r in remaining if r is not None] n_ellided = n_dim - n_real - len(real_remaining) can_slicers.extend((slice(None),) * n_ellided) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 6358a6af81..1091bedbcb 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -55,46 +55,10 @@ # caveat: Note that it's ambiguous to get the code given the bytespervoxel # caveat 2: Note that the bytespervox you get is in str ( not an int) _dtdefs = ( # code, conversion function, dtype, bytes per voxel - ( - 0, - 'uint8', - '>u1', - '1', - 'MRI_UCHAR', - np.uint8, - np.dtype(np.uint8), - np.dtype(np.uint8).newbyteorder('>'), - ), - ( - 4, - 'int16', - '>i2', - '2', - 'MRI_SHORT', - np.int16, - np.dtype(np.int16), - np.dtype(np.int16).newbyteorder('>'), - ), - ( - 1, - 'int32', - '>i4', - '4', - 'MRI_INT', - np.int32, - np.dtype(np.int32), - np.dtype(np.int32).newbyteorder('>'), - ), - ( - 3, - 'float', - '>f4', - '4', - 'MRI_FLOAT', - np.float32, - np.dtype(np.float32), - np.dtype(np.float32).newbyteorder('>'), - ), + (0, 'uint8', '>u1', '1', 'MRI_UCHAR', np.uint8, np.dtype('u1'), np.dtype('>u1')), + (4, 'int16', '>i2', '2', 'MRI_SHORT', np.int16, np.dtype('i2'), np.dtype('>i2')), + (1, 'int32', '>i4', '4', 'MRI_INT', np.int32, np.dtype('i4'), np.dtype('>i4')), + (3, 'float', '>f4', '4', 'MRI_FLOAT', np.float32, np.dtype('f4'), np.dtype('>f4')), ) # make full code alias bank, including dtype column @@ -233,7 +197,12 @@ def get_vox2ras_tkr(self): ds = self._structarr['delta'] ns = self._structarr['dims'][:3] * ds / 2.0 v2rtkr = np.array( - [[-ds[0], 0, 0, ns[0]], [0, 0, ds[2], -ns[2]], [0, -ds[1], 0, ns[1]], [0, 0, 0, 1]], + [ + [-ds[0], 0, 0, ns[0]], + [0, 0, ds[2], -ns[2]], + [0, -ds[1], 0, ns[1]], + [0, 0, 0, 1], + ], dtype=np.float32, ) return v2rtkr @@ -312,7 +281,7 @@ def set_zooms(self, zooms): raise HeaderDataError('Expecting %d zoom values' % ndims) if np.any(zooms[:3] <= 0): raise HeaderDataError( - 'Spatial (first three) zooms must be positive; got ' f'{tuple(zooms[:3])}' + f'Spatial (first three) zooms must be positive; got {tuple(zooms[:3])}' ) hdr['delta'] = zooms[:3] if len(zooms) == 4: @@ -474,7 +443,7 @@ def as_byteswapped(self, endianness=None): """ if endianness is None or endian_codes[endianness] != '>': - raise ValueError('Cannot byteswap MGHHeader - ' 'must always be big endian') + raise ValueError('Cannot byteswap MGHHeader - must always be big endian') return self.copy() @classmethod diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index ee0ed50fec..0a850488c2 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -32,11 +32,22 @@ # sample voxel to ras matrix (mri_info --vox2ras) v2r = np.array( - [[1, 2, 3, -13], [2, 3, 1, -11.5], [3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32 + [ + [1, 2, 3, -13], + [2, 3, 1, -11.5], + [3, 1, 2, -11.5], + [0, 0, 0, 1], + ], + dtype=np.float32, ) # sample voxel to ras - tkr matrix (mri_info --vox2ras-tkr) v2rtkr = np.array( - [[-1.0, 0.0, 0.0, 1.5], [0.0, 0.0, 1.0, -2.5], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 0.0, 1.0]], + [ + [-1.0, 0.0, 0.0, 1.5], + [0.0, 0.0, 1.0, -2.5], + [0.0, -1.0, 0.0, 2.0], + [0.0, 0.0, 0.0, 1.0], + ], dtype=np.float32, ) @@ -145,7 +156,13 @@ def test_set_zooms(): assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 2]) h.set_zooms([1, 1, 1, 3]) assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 3]) - for zooms in ((-1, 1, 1, 1), (1, -1, 1, 1), (1, 1, -1, 1), (1, 1, 1, -1), (1, 1, 1, 1, 5)): + for zooms in ( + (-1, 1, 1, 1), + (1, -1, 1, 1), + (1, 1, -1, 1), + (1, 1, 1, -1), + (1, 1, 1, 1, 5), + ): with pytest.raises(HeaderDataError): h.set_zooms(zooms) # smoke test for tr=0 diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 7313f984f2..dc205d8004 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -108,7 +108,7 @@ def _sanitize(args, kwargs): @property @deprecate_with_version( - 'The data attribute is deprecated. Use GiftiMetaData object ' 'directly as a dict.', + 'The data attribute is deprecated. Use GiftiMetaData object directly as a dict.', '4.0', '6.0', ) @@ -147,7 +147,7 @@ class GiftiNVPairs: """ @deprecate_with_version( - 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' 'as a dict, instead.', + 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object as a dict, instead.', '4.0', '6.0', ) @@ -834,11 +834,10 @@ def _to_xml_element(self): def to_xml(self, enc='utf-8'): """Return XML corresponding to image content""" - return b""" + header = b""" -""" + xml.XmlSerializable.to_xml( - self, enc - ) +""" + return header + super().to_xml(enc) # Avoid the indirection of going through to_file_map to_bytes = to_xml diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 88c63b5600..68dfb00af8 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -64,7 +64,7 @@ def read_data_block(darray, fname, data, mmap): ``numpy.ndarray`` or ``numpy.memmap`` containing the parsed data """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', 'r', 'r+'") if mmap is True: mmap = 'c' enclabel = gifti_encoding_codes.label[darray.encoding] @@ -85,7 +85,7 @@ def read_data_block(darray, fname, data, mmap): if enclabel == 'External': if fname is None: raise GiftiParseError( - 'ExternalFileBinary is not supported ' 'when loading from in-memory XML' + 'ExternalFileBinary is not supported when loading from in-memory XML' ) ext_fname = op.join(op.dirname(fname), darray.ext_fname) if not op.exists(ext_fname): diff --git a/nibabel/gifti/util.py b/nibabel/gifti/util.py index 7659ee33cc..9393292013 100644 --- a/nibabel/gifti/util.py +++ b/nibabel/gifti/util.py @@ -13,7 +13,11 @@ KIND2FMT = {'i': '%i', 'u': '%i', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} array_index_order_codes = Recoder( - ((1, 'RowMajorOrder', 'C'), (2, 'ColumnMajorOrder', 'F')), fields=('code', 'label', 'npcode') + ( + (1, 'RowMajorOrder', 'C'), + (2, 'ColumnMajorOrder', 'F'), + ), + fields=('code', 'label', 'npcode'), ) gifti_encoding_codes = Recoder( diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index f64f3e8230..6c1981ca77 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -198,7 +198,9 @@ def save(img, filename, **kwargs): @deprecate_with_version( - 'read_img_data deprecated. ' 'Please use ``img.dataobj.get_unscaled()`` instead.', '3.2', '5.0' + 'read_img_data deprecated. Please use ``img.dataobj.get_unscaled()`` instead.', + '3.2', + '5.0', ) def read_img_data(img, prefer='scaled'): """Read data from image associated with files diff --git a/nibabel/minc1.py b/nibabel/minc1.py index d6d2d3081b..fb183277bc 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -87,7 +87,7 @@ def get_data_shape(self): def get_zooms(self): """Get real-world sizes of voxels""" # zooms must be positive; but steps in MINC can be negative - return tuple([abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 for dim in self._dims]) + return tuple(abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 for dim in self._dims) def get_affine(self): nspatial = len(self._spatial_dims) @@ -127,7 +127,7 @@ def _get_valid_range(self): except AttributeError: valid_range = [info.min, info.max] if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' 'data type range') + raise ValueError('Valid range outside input data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): @@ -170,7 +170,7 @@ def _normalize(self, data, sliceobj=()): mx_dims = self._get_dimensions(image_max) mn_dims = self._get_dimensions(image_min) if mx_dims != mn_dims: - raise MincError('"image-max" and "image-min" do not have the same' 'dimensions') + raise MincError('"image-max" and "image-min" do not have the same dimensions') nscales = len(mx_dims) if nscales > 2: raise MincError('More than two scaling dimensions') diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 9638ced5ee..1fffae0c86 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -97,7 +97,7 @@ def _get_valid_range(self): valid_range = [info.min, info.max] else: if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' 'data type range') + raise ValueError('Valid range outside input data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 376dcb5b5a..961e93ecbb 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -99,8 +99,8 @@ def read(csa_str): csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I') if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS: raise CSAReadError( - 'Number of tags `t` should be ' - '0 < t <= %d. Instead found %d tags.' % (MAX_CSA_ITEMS, csa_dict['n_tags']) + f'Number of tags `t` should be 0 < t <= {MAX_CSA_ITEMS}. ' + f'Instead found {csa_dict["n_tags"]} tags.' ) for tag_no in range(csa_dict['n_tags']): name, vm, vr, syngodt, n_items, last3 = up_str.unpack('64si4s3i') @@ -138,7 +138,7 @@ def read(csa_str): else: # CSA2 item_len = x1 if (ptr + item_len) > csa_len: - raise CSAReadError('Item is too long, ' 'aborting read') + raise CSAReadError('Item is too long, aborting read') if item_no >= n_values: assert item_len == 0 continue diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index a3c49d7f10..113af967cc 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -170,9 +170,9 @@ def _third_pass(wrappers): '- slices are probably unsortable' ) if None in inos: - raise DicomReadError(msg_fmt % 'some or all slices with ' 'missing InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with missing InstanceNumber') if len(set(inos)) < len(inos): - raise DicomReadError(msg_fmt % 'some or all slices with ' 'the same InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with the same InstanceNumber') # sort by instance number wrappers.sort(key=_instance_sorter) # start loop, in which we start a new volume, each time we see a z diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index be070e8608..9290d6c376 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -84,9 +84,8 @@ def wrapper_from_data(dcm_data): csa = csar.get_csa_header(dcm_data) except csar.CSAReadError as e: warnings.warn( - 'Error while attempting to read CSA header: ' - + str(e.args) - + '\n Ignoring Siemens private (CSA) header info.' + f'Error while attempting to read CSA header: {e.args}\n' + 'Ignoring Siemens private (CSA) header info.' ) csa = None if csa is None: @@ -193,7 +192,7 @@ def rotation_matrix(self): # motivated in ``doc/source/notebooks/ata_error.ipynb``, and from # discussion at https://github.com/nipy/nibabel/pull/156 if not np.allclose(np.eye(3), np.dot(R, R.T), atol=5e-5): - raise WrapperPrecisionError('Rotation matrix not nearly ' 'orthogonal') + raise WrapperPrecisionError('Rotation matrix not nearly orthogonal') return R @one_time @@ -537,7 +536,7 @@ def image_shape(self): stack_ids = set(frame.FrameContentSequence[0].StackID for frame in self.frames) if len(stack_ids) > 1: raise WrapperError( - 'File contains more than one StackID. ' 'Cannot handle multi-stack files' + 'File contains more than one StackID. Cannot handle multi-stack files' ) # Determine if one of the dimension indices refers to the stack id dim_seq = [dim.DimensionIndexPointer for dim in self.get('DimensionIndexSequence')] @@ -551,9 +550,7 @@ def image_shape(self): # derived volume is included derived_tag = pydicom.datadict.tag_for_keyword('DiffusionBValue') if derived_tag not in dim_seq: - raise WrapperError( - 'Missing information, cannot remove indices ' 'with confidence.' - ) + raise WrapperError('Missing information, cannot remove indices with confidence.') derived_dim_idx = dim_seq.index(derived_tag) frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) # account for the 2 additional dimensions (row and column) not included @@ -568,7 +565,7 @@ def image_shape(self): shape = (rows, cols) + tuple(ns_unique) n_vols = np.prod(shape[3:]) if n_frames != n_vols * shape[2]: - raise WrapperError('Calculated shape does not match number of ' 'frames.') + raise WrapperError('Calculated shape does not match number of frames.') return tuple(shape) @one_time @@ -582,7 +579,7 @@ def image_orient_patient(self): try: iop = self.frames[0].PlaneOrientationSequence[0].ImageOrientationPatient except AttributeError: - raise WrapperError('Not enough information for ' 'image_orient_patient') + raise WrapperError('Not enough information for image_orient_patient') if iop is None: return None iop = np.array(list(map(float, iop))) @@ -833,9 +830,7 @@ def __init__(self, dcm_data, csa_header=None, n_mosaic=None): pass if n_mosaic is None or n_mosaic == 0: raise WrapperError( - 'No valid mosaic number in CSA ' - 'header; is this really ' - 'Siemens mosiac data?' + 'No valid mosaic number in CSA header; is this really Siemens mosiac data?' ) self.n_mosaic = n_mosaic self.mosaic_size = int(np.ceil(np.sqrt(n_mosaic))) @@ -848,8 +843,7 @@ def image_shape(self): cols = self.get('Columns') if None in (rows, cols): return None - mosaic_size = self.mosaic_size - return (int(rows / mosaic_size), int(cols / mosaic_size), self.n_mosaic) + return (rows // self.mosaic_size, cols // self.mosaic_size, self.n_mosaic) @one_time def image_position(self): diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index b7a60dfc3b..1e749aced1 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -54,7 +54,7 @@ def test_passing_kwds(): def test_slices_to_series(): - dicom_files = (pjoin(IO_DATA_PATH, '%d.dcm' % i) for i in range(2)) + dicom_files = (pjoin(IO_DATA_PATH, f'{i}.dcm') for i in range(2)) wrappers = [didr.wrapper_from_file(f) for f in dicom_files] series = didr.slices_to_series(wrappers) assert len(series) == 1 diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 0d28298313..392bf5c2ad 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -815,7 +815,7 @@ def get_data_shape(self): vec_len = int(self._structarr['glmin']) if vec_len == 0: raise HeaderDataError( - '-1 in dim[1] but 0 in glmin; ' 'inconsistent freesurfer type header?' + '-1 in dim[1] but 0 in glmin; inconsistent freesurfer type header?' ) return (vec_len, 1, 1) + shape[3:] # Apply freesurfer hack for ico7 surface @@ -1095,7 +1095,7 @@ def set_qform(self, affine, code=None, strip_shears=True): P, S, Qs = npl.svd(R) PR = np.dot(P, Qs) if not strip_shears and not np.allclose(PR, R): - raise HeaderDataError('Shears in affine and `strip_shears` is ' 'False') + raise HeaderDataError('Shears in affine and `strip_shears` is False') # Convert to quaternion quat = mat2quat(PR) # Set into header @@ -1498,7 +1498,7 @@ def get_slice_duration(self): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set for duration to be valid') return float(self._structarr['slice_duration']) def set_slice_duration(self, duration): @@ -1515,20 +1515,20 @@ def set_slice_duration(self, duration): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set for duration to be valid') self._structarr['slice_duration'] = duration def get_n_slices(self): """Return the number of slices""" _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension not set in header ' 'dim_info') + raise HeaderDataError('Slice dimension not set in header dim_info') shape = self.get_data_shape() try: slice_len = shape[slice_dim] except IndexError: raise HeaderDataError( - f'Slice dimension index ({slice_dim}) ' f'outside shape tuple ({shape})' + f'Slice dimension index ({slice_dim}) outside shape tuple ({shape})' ) return slice_len @@ -1561,7 +1561,7 @@ def get_slice_times(self): duration = self.get_slice_duration() slabel = self.get_value_label('slice_code') if slabel == 'unknown': - raise HeaderDataError('Cannot get slice times when ' 'Slice code is "unknown"') + raise HeaderDataError('Cannot get slice times when slice code is "unknown"') slice_start, slice_end = (int(hdr['slice_start']), int(hdr['slice_end'])) if slice_start < 0: raise HeaderDataError('slice_start should be >= 0') @@ -1602,7 +1602,7 @@ def set_slice_times(self, slice_times): hdr = self._structarr slice_len = self.get_n_slices() if slice_len != len(slice_times): - raise HeaderDataError('Number of slice times does not ' 'match number of slices') + raise HeaderDataError('Number of slice times does not match number of slices') # Extract Nones at beginning and end. Check for others for ind, time in enumerate(slice_times): if time is not None: @@ -1617,12 +1617,12 @@ def set_slice_times(self, slice_times): timed = slice_times[slice_start : slice_end + 1] for time in timed: if time is None: - raise HeaderDataError('Cannot have None in middle ' 'of slice time vector') + raise HeaderDataError('Cannot have None in middle of slice time vector') # Find slice duration, check times are compatible with single # duration tdiffs = np.diff(np.sort(timed)) if not np.allclose(np.diff(tdiffs), 0): - raise HeaderDataError('Slice times not compatible with ' 'single slice duration') + raise HeaderDataError('Slice times not compatible with single slice duration') duration = np.mean(tdiffs) # To slice time order st_order = np.round(np.array(timed) / duration) @@ -1752,7 +1752,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep if magic == hdr.single_magic and offset < hdr.single_vox_offset: rep.problem_level = 40 - rep.problem_msg = 'vox offset %d too low for ' 'single file nifti1' % offset + rep.problem_msg = 'vox offset %d too low for single file nifti1' % offset if fix: hdr['vox_offset'] = hdr.single_vox_offset rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 193e458c6b..01a918e445 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -211,7 +211,7 @@ def _chk_eol_check(hdr, fix=False): return hdr, rep rep.problem_level = 40 rep.problem_msg = ( - 'EOL check not 0 or 13, 10, 26, 10; data may be ' 'corrupted by EOL conversion' + 'EOL check not 0 or 13, 10, 26, 10; data may be corrupted by EOL conversion' ) if fix: hdr['eol_check'] = (13, 10, 26, 10) diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index c91ad0f1e8..d1eb9d17d5 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -105,7 +105,7 @@ def optional_package(name, trip_msg=None, min_version=None): trip_msg = f'These functions need {name} version >= {min_version}' if trip_msg is None: trip_msg = ( - f'We need package {name} for these functions, ' f'but ``import {name}`` raised {exc}' + f'We need package {name} for these functions, but ``import {name}`` raised {exc}' ) pkg = TripWire(trip_msg) diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 0adf19ca78..f9e1ea028c 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -154,10 +154,10 @@ def apply_orientation(arr, ornt): ornt = np.asarray(ornt) n = ornt.shape[0] if t_arr.ndim < n: - raise OrientationError('Data array has fewer dimensions than ' 'orientation') + raise OrientationError('Data array has fewer dimensions than orientation') # no coordinates can be dropped for applying the orientations if np.any(np.isnan(ornt[:, 0])): - raise OrientationError('Cannot drop coordinates when ' 'applying orientation to data') + raise OrientationError('Cannot drop coordinates when applying orientation to data') # apply ornt transformations for ax, flip in enumerate(ornt[:, 1]): if flip == -1: @@ -225,7 +225,11 @@ def inv_ornt_aff(ornt, shape): return np.dot(undo_flip, undo_reorder) -@deprecate_with_version('flip_axis is deprecated. ' 'Please use numpy.flip instead.', '3.2', '5.0') +@deprecate_with_version( + 'flip_axis is deprecated. Please use numpy.flip instead.', + '3.2', + '5.0', +) def flip_axis(arr, axis=0): """Flip contents of `axis` in array `arr` diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 04184117dc..81e956f2b8 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -141,16 +141,35 @@ # PSL to RAS affine PSL_TO_RAS = np.array( - [[0, 0, -1, 0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]] # L -> R # P -> A # S -> S + [ + [0, 0, -1, 0], # L -> R + [-1, 0, 0, 0], # P -> A + [0, 1, 0, 0], # S -> S + [0, 0, 0, 1], + ] ) # Acquisition (tra/sag/cor) to PSL axes # These come from looking at transverse, sagittal, coronal datasets where we # can see the LR, PA, SI orientation of the slice axes from the scanned object ACQ_TO_PSL = dict( - transverse=np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L + transverse=np.array( + [ + [0, 1, 0, 0], # P + [0, 0, 1, 0], # S + [1, 0, 0, 0], # L + [0, 0, 0, 1], + ] + ), sagittal=np.diag([1, -1, -1, 1]), - coronal=np.array([[0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L + coronal=np.array( + [ + [0, 0, 1, 0], # P + [0, -1, 0, 0], # S + [1, 0, 0, 0], # L + [0, 0, 0, 1], + ] + ), ) DEG2RAD = np.pi / 180.0 @@ -212,121 +231,55 @@ image_def_dtds = {} image_def_dtds['V4'] = [ ('slice number', int), - ( - 'echo number', - int, - ), - ( - 'dynamic scan number', - int, - ), - ( - 'cardiac phase number', - int, - ), - ( - 'image_type_mr', - int, - ), - ( - 'scanning sequence', - int, - ), - ( - 'index in REC file', - int, - ), - ( - 'image pixel size', - int, - ), - ( - 'scan percentage', - int, - ), - ('recon resolution', int, (2,)), + ('echo number', int), + ('dynamic scan number', int), + ('cardiac phase number', int), + ('image_type_mr', int), + ('scanning sequence', int), + ('index in REC file', int), + ('image pixel size', int), + ('scan percentage', int), + ('recon resolution', int, (2)), ('rescale intercept', float), ('rescale slope', float), ('scale slope', float), # Window center, width recorded as integer but can be float - ( - 'window center', - float, - ), - ( - 'window width', - float, - ), + ('window center', float), + ('window width', float), ('image angulation', float, (3,)), ('image offcentre', float, (3,)), ('slice thickness', float), ('slice gap', float), - ( - 'image_display_orientation', - int, - ), - ( - 'slice orientation', - int, - ), - ( - 'fmri_status_indication', - int, - ), - ( - 'image_type_ed_es', - int, - ), + ('image_display_orientation', int), + ('slice orientation', int), + ('fmri_status_indication', int), + ('image_type_ed_es', int), ('pixel spacing', float, (2,)), ('echo_time', float), ('dyn_scan_begin_time', float), ('trigger_time', float), ('diffusion_b_factor', float), - ( - 'number of averages', - int, - ), + ('number of averages', int), ('image_flip_angle', float), - ( - 'cardiac frequency', - int, - ), - ( - 'minimum RR-interval', - int, - ), - ( - 'maximum RR-interval', - int, - ), - ( - 'TURBO factor', - int, - ), + ('cardiac frequency', int), + ('minimum RR-interval', int), + ('maximum RR-interval', int), + ('TURBO factor', int), ('Inversion delay', float), ] # Extra image def fields for 4.1 compared to 4 image_def_dtds['V4.1'] = image_def_dtds['V4'] + [ - ( - 'diffusion b value number', - int, - ), # (imagekey!) - ( - 'gradient orientation number', - int, - ), # (imagekey!) - ('contrast type', 'S30'), # XXX might be too short? - ('diffusion anisotropy type', 'S30'), # XXX might be too short? + ('diffusion b value number', int), # (imagekey!) + ('gradient orientation number', int), # (imagekey!) + ('contrast type', 'S30'), # XXX might be too short? + ('diffusion anisotropy type', 'S30'), # XXX might be too short? ('diffusion', float, (3,)), ] # Extra image def fields for 4.2 compared to 4.1 image_def_dtds['V4.2'] = image_def_dtds['V4.1'] + [ - ( - 'label type', - int, - ), # (imagekey!) + ('label type', int), # (imagekey!) ] #: PAR header versions we claim to understand @@ -337,7 +290,12 @@ #: slice orientation codes slice_orientation_codes = Recoder( - ((1, 'transverse'), (2, 'sagittal'), (3, 'coronal')), fields=('code', 'label') # code, label + ( # code, label + (1, 'transverse'), + (2, 'sagittal'), + (3, 'coronal'), + ), + fields=('code', 'label'), ) @@ -804,7 +762,7 @@ def from_header(klass, header=None): raise PARRECError('Cannot create PARRECHeader from air.') if type(header) == klass: return header.copy() - raise PARRECError('Cannot create PARREC header from ' 'non-PARREC header.') + raise PARRECError('Cannot create PARREC header from non-PARREC header.') @classmethod def from_fileobj(klass, fileobj, permit_truncated=False, strict_sort=False): @@ -830,9 +788,7 @@ def as_analyze_map(self): f"{self.general_info['patient_name']};" f"{self.general_info['exam_date'].replace(' ', '')};" f"{self.general_info['protocol_name']}" - )[ - :80 - ] # max len + )[:80] is_fmri = self.general_info['max_dynamics'] > 1 # PAR/REC uses msec, but in _calc_zooms we convert to sec t = 'sec' if is_fmri else 'unknown' @@ -930,7 +886,7 @@ def _get_unique_image_prop(self, name): props = self.image_defs[name] if np.any(np.diff(props, axis=0)): raise PARRECError( - f'Varying {name} in image sequence ' f'({props}). This is not supported.' + f'Varying {name} in image sequence ({props}). This is not supported.' ) return props[0] diff --git a/nibabel/processing.py b/nibabel/processing.py index 669b416fb6..e3c9ae8214 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -114,7 +114,12 @@ def adapt_affine(affine, n_dim): def resample_from_to( - from_img, to_vox_map, order=3, mode='constant', cval=0.0, out_class=Nifti1Image + from_img, + to_vox_map, + order=3, + mode='constant', + cval=0.0, + out_class=Nifti1Image, ): """Resample image `from_img` to mapped voxel space `to_vox_map` @@ -155,7 +160,7 @@ def resample_from_to( # This check requires `shape` attribute of image if not spatial_axes_first(from_img): raise ValueError( - 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(from_img)) + f'Cannot predict position of spatial axes for Image type {type(from_img)}' ) try: to_shape, to_affine = to_vox_map.shape, to_vox_map.affine @@ -177,7 +182,12 @@ def resample_from_to( def resample_to_output( - in_img, voxel_sizes=None, order=3, mode='constant', cval=0.0, out_class=Nifti1Image + in_img, + voxel_sizes=None, + order=3, + mode='constant', + cval=0.0, + out_class=Nifti1Image, ): """Resample image `in_img` to output voxel axes (world space) @@ -235,7 +245,13 @@ def resample_to_output( return resample_from_to(in_img, out_vox_map, order, mode, cval, out_class) -def smooth_image(img, fwhm, mode='nearest', cval=0.0, out_class=Nifti1Image): +def smooth_image( + img, + fwhm, + mode='nearest', + cval=0.0, + out_class=Nifti1Image, +): """Smooth image `img` along voxel axes by FWHM `fwhm` millimeters Parameters @@ -275,9 +291,7 @@ def smooth_image(img, fwhm, mode='nearest', cval=0.0, out_class=Nifti1Image): """ # This check requires `shape` attribute of image if not spatial_axes_first(img): - raise ValueError( - 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(img)) - ) + raise ValueError(f'Cannot predict position of spatial axes for Image type {type(img)}') if out_class is None: out_class = img.__class__ n_dim = len(img.shape) diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index a63894cef8..cb40633e54 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -7,7 +7,12 @@ def rst_table( - cell_values, row_names=None, col_names=None, title='', val_fmt='{0:5.2f}', format_chars=None + cell_values, + row_names=None, + col_names=None, + title='', + val_fmt='{0:5.2f}', + format_chars=None, ): """Return string for ReST table with entries `cell_values` @@ -87,14 +92,23 @@ def rst_table( row_val_fmt = '{0:<' + str(col_len) + '}' table_strs = [] if title != '': - table_strs += [title_heading * len(title), title, title_heading * len(title), ''] + table_strs += [ + title_heading * len(title), + title, + title_heading * len(title), + '', + ] along_headings = [along * len(h) for h in col_headings] crossed_line = cross_starter + cross_joiner.join(along_headings) + cross_ender thick_long_headings = [thick_long * len(h) for h in col_headings] crossed_thick_line = ( cross_thick_starter + cross_thick_joiner.join(thick_long_headings) + cross_thick_ender ) - table_strs += [crossed_line, down_starter + col_header + down_ender, crossed_thick_line] + table_strs += [ + crossed_line, + down_starter + col_header + down_ender, + crossed_thick_line, + ] for row_no, row_name in enumerate(row_names): row_vals = [row_val_fmt.format(row_str) for row_str in row_str_list[row_no]] row_line = down_starter + down_joiner.join([row_name] + row_vals) + down_ender diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 1adf63fe42..c582ee149b 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -311,8 +311,7 @@ def __init__(self, img): if not spatial_axes_first(img): raise ValueError( - 'Cannot predict position of spatial axes for ' - 'Image type ' + img.__class__.__name__ + 'Cannot predict position of spatial axes for image type {img.__class__.__name__}' ) self.img = img @@ -356,7 +355,7 @@ def check_slicing(self, slicer, return_spatial=False): raise IndexError('New axis not permitted in spatial dimensions') elif isinstance(subslicer, int): raise IndexError( - 'Scalar indices disallowed in spatial dimensions; ' 'Use `[x]` or `x:x+1`.' + 'Scalar indices disallowed in spatial dimensions; Use `[x]` or `x:x+1`.' ) return spatial_slices if return_spatial else slicer @@ -495,16 +494,14 @@ def _affine2header(self): def __str__(self): shape = self.shape affine = self.affine - return '\n'.join( - ( - str(self.__class__), - f'data shape {shape}', - 'affine: ', - str(affine), - 'metadata:', - str(self._header), - ) - ) + return f""" +{self.__class__} +data shape {shape} +affine: +{affine} +metadata: +{self._header} +""" def get_data_dtype(self): return self._header.get_data_dtype() diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index cad77c4d09..7a2f176318 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -88,7 +88,7 @@ def set_slope_inter(self, slope, inter=None): self._structarr['scl_slope'] = slope if inter in (None, 0) or np.isnan(inter): return - raise HeaderTypeError('Cannot set non-zero intercept ' 'for SPM headers') + raise HeaderTypeError('Cannot set non-zero intercept for SPM headers') class Spm99AnalyzeHeader(SpmAnalyzeHeader): @@ -282,7 +282,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): if 'mat' in mats: # this overrides a 'M', and includes any flip mat = mats['mat'] if mat.ndim > 2: - warnings.warn('More than one affine in "mat" matrix, ' 'using first') + warnings.warn('More than one affine in "mat" matrix, using first') mat = mat[:, :, 0] ret._affine = mat elif 'M' in mats: # the 'M' matrix does not include flips diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 604c32b1e5..f99f80e4e4 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -11,7 +11,10 @@ from .trk import TrkFile # List of all supported formats -FORMATS = {'.trk': TrkFile, '.tck': TckFile} +FORMATS = { + '.trk': TrkFile, + '.tck': TckFile, +} def is_supported(fileobj): diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index f9e9af90e3..faa5d2390d 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -527,7 +527,7 @@ def _op(self, op, value=None, inplace=False): def __iter__(self): if len(self._lengths) != len(self._offsets): raise ValueError( - 'ArraySequence object corrupted:' ' len(self._lengths) != len(self._offsets)' + 'ArraySequence object corrupted: len(self._lengths) != len(self._offsets)' ) for offset, lengths in zip(self._offsets, self._lengths): diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index e08afb48ea..7738a0e069 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -360,14 +360,14 @@ def _read_header(cls, fileobj): # Check integrity of TCK header. if 'datatype' not in hdr: - msg = "Missing 'datatype' attribute in TCK header." ' Assuming it is Float32LE.' + msg = "Missing 'datatype' attribute in TCK header. Assuming it is Float32LE." warnings.warn(msg, HeaderWarning) hdr['datatype'] = 'Float32LE' if not hdr['datatype'].startswith('Float32'): msg = ( - "TCK only supports float32 dtype but 'datatype: " - f"{hdr['datatype']}' was specified in the header." + f"TCK only supports float32 dtype but 'datatype: {hdr['datatype']}' " + 'was specified in the header.' ) raise HeaderError(msg) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index bbf156ee08..b32e12d8b3 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -508,15 +508,7 @@ def save(self, fileobj): points = np.asarray(t.streamline) scalars = [np.asarray(t.data_for_points[k]) for k in data_for_points_keys] - scalars = np.concatenate( - [ - np.ndarray( - (len(points), 0), - ) - ] - + scalars, - axis=1, - ) + scalars = np.concatenate([np.ndarray((len(points), 0))] + scalars, axis=1) properties = [ np.asarray(t.data_for_streamline[k]) for k in data_for_streamline_keys ] @@ -543,7 +535,7 @@ def save(self, fileobj): raise DataError(msg) if nb_properties_per_streamline != int(nb_properties_per_streamline): - msg = 'Nb. of properties differs from one streamline to' ' another!' + msg = 'Nb. of properties differs from one streamline to another!' raise DataError(msg) header[Field.NB_STREAMLINES] = nb_streamlines @@ -599,14 +591,14 @@ def _read_header(fileobj): header_rec[Field.VOXEL_TO_RASMM] = np.zeros((4, 4)) elif header_rec['version'] == 3: warnings.warn( - 'Parsing a TRK v3 file as v2. Some features may not ' 'be handled correctly.', + 'Parsing a TRK v3 file as v2. Some features may not be handled correctly.', HeaderWarning, ) elif header_rec['version'] in (2, 3): pass # Nothing more to do. else: raise HeaderError( - 'NiBabel only supports versions 1 and 2 of ' 'the Trackvis file format' + 'NiBabel only supports versions 1 and 2 of the Trackvis file format' ) # Convert the first record of `header_rec` into a dictionary @@ -617,11 +609,8 @@ def _read_header(fileobj): if header[Field.VOXEL_TO_RASMM][3][3] == 0: header[Field.VOXEL_TO_RASMM] = np.eye(4, dtype=np.float32) warnings.warn( - ( - "Field 'vox_to_ras' in the TRK's header was" - " not recorded. Will continue assuming it's" - ' the identity.' - ), + "Field 'vox_to_ras' in the TRK's header was not recorded. " + "Will continue assuming it's the identity.", HeaderWarning, ) diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 08ae5f4bda..28f405e566 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -109,10 +109,23 @@ def test_append_diag(): # Routine for appending diagonal elements assert_array_equal(append_diag(np.diag([2, 3, 1]), [1]), np.diag([2, 3, 1, 1])) assert_array_equal(append_diag(np.diag([2, 3, 1]), [1, 1]), np.diag([2, 3, 1, 1, 1])) - aff = np.array([[2, 0, 0], [0, 3, 0], [0, 0, 1], [0, 0, 1]]) + aff = np.array( + [ + [2, 0, 0], + [0, 3, 0], + [0, 0, 1], + [0, 0, 1], + ] + ) assert_array_equal( append_diag(aff, [5], [9]), - [[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1], [0, 0, 5, 9], [0, 0, 0, 1]], + [ + [2, 0, 0, 0], + [0, 3, 0, 0], + [0, 0, 0, 1], + [0, 0, 5, 9], + [0, 0, 0, 1], + ], ) assert_array_equal( append_diag(aff, [5, 6], [9, 10]), @@ -125,10 +138,21 @@ def test_append_diag(): [0, 0, 0, 0, 1], ], ) - aff = np.array([[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1]]) + aff = np.array( + [ + [2, 0, 0, 0], + [0, 3, 0, 0], + [0, 0, 0, 1], + ] + ) assert_array_equal( append_diag(aff, [5], [9]), - [[2, 0, 0, 0, 0], [0, 3, 0, 0, 0], [0, 0, 0, 5, 9], [0, 0, 0, 0, 1]], + [ + [2, 0, 0, 0, 0], + [0, 3, 0, 0, 0], + [0, 0, 0, 5, 9], + [0, 0, 0, 0, 1], + ], ) # Length of starts has to match length of steps with pytest.raises(AffineError): @@ -150,15 +174,8 @@ def test_dot_reduce(): assert_array_equal(dot_reduce(vec, mat), np.dot(vec, mat)) assert_array_equal(dot_reduce(mat, vec), np.dot(mat, vec)) mat2 = np.arange(13, 22).reshape((3, 3)) - assert_array_equal(dot_reduce(mat2, vec, mat), np.dot(mat2, np.dot(vec, mat))) - assert_array_equal( - dot_reduce( - mat, - vec, - mat2, - ), - np.dot(mat, np.dot(vec, mat2)), - ) + assert_array_equal(dot_reduce(mat2, vec, mat), mat2 @ (vec @ mat)) + assert_array_equal(dot_reduce(mat, vec, mat2), mat @ (vec @ mat2)) def test_voxel_sizes(): diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 1f80addc30..5287bad4a9 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -238,7 +238,7 @@ def test_logger_error(self): imageglobals.logger = logger hdr.copy().check_fix() assert str_io.getvalue() == ( - 'bitpix does not match datatype; ' 'setting bitpix to match datatype\n' + 'bitpix does not match datatype; setting bitpix to match datatype\n' ) # Check that error_level in fact causes error to be raised imageglobals.error_level = 10 diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index b2c1f1257c..5bf6e79cb9 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -24,7 +24,12 @@ shape=(33, 41, 25, 3), dtype=np.int16, affine=np.array( - [[-3.0, 0, 0, 49.5], [0, -3.0, 0, 82.312], [0, 0, 3.0, -52.3511], [0, 0, 0, 1.0]] + [ + [-3.0, 0, 0, 49.5], + [0, -3.0, 0, 82.312], + [0, 0, 3.0, -52.3511], + [0, 0, 0, 1.0], + ] ), zooms=(3.0, 3.0, 3.0, 3.0), data_summary=dict(min=0, max=13722, mean=4266.76024636), @@ -39,7 +44,12 @@ shape=(47, 54, 43, 1.0), dtype=np.int16, affine=np.array( - [[3.0, 0, 0, -66.0], [0, 3.0, 0, -87.0], [0, 0, 3.0, -54.0], [0, 0, 0, 1.0]] + [ + [3.0, 0, 0, -66.0], + [0, 3.0, 0, -87.0], + [0, 0, 3.0, -54.0], + [0, 0, 0, 1.0], + ] ), zooms=(3.0, 3.0, 3.0, 0.0), data_summary=dict( diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 8b0fb932d5..b0c965c399 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -32,19 +32,37 @@ def x_only(x): cosx = np.cos(x) sinx = np.sin(x) - return np.array([[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) + return np.array( + [ + [1, 0, 0], + [0, cosx, -sinx], + [0, sinx, cosx], + ] + ) def y_only(y): cosy = np.cos(y) siny = np.sin(y) - return np.array([[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]]) + return np.array( + [ + [cosy, 0, siny], + [0, 1, 0], + [-siny, 0, cosy], + ] + ) def z_only(z): cosz = np.cos(z) sinz = np.sin(z) - return np.array([[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]]) + return np.array( + [ + [cosz, -sinz, 0], + [sinz, cosz, 0], + [0, 0, 1], + ] + ) def sympy_euler(z, y, x): diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 781f17d716..9f42e67c0d 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -120,14 +120,7 @@ def test_canonical_slicers(): canonical_slicers((1, 10), shape, True) # Unless check_inds is False assert canonical_slicers((10,), shape, False) == (10, slice(None)) - assert canonical_slicers( - ( - 1, - 10, - ), - shape, - False, - ) == (1, 10) + assert canonical_slicers((1, 10), shape, False) == (1, 10) # Check negative -> positive assert canonical_slicers(-1, shape) == (9, slice(None)) assert canonical_slicers((slice(None), -1), shape) == (slice(None), 9) @@ -487,15 +480,10 @@ def test_optimize_read_slicers(): (slice(None), slice(None)), ) # optimizing - assert optimize_read_slicers( - ( - slice(None), - slice(0, 5, 2), - ), - (10, 6), - 4, - _always, - ) == ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2))) + assert optimize_read_slicers((slice(None), slice(0, 5, 2)), (10, 6), 4, _always) == ( + (slice(None), slice(0, 5, 1)), + (slice(None), slice(None, None, 2)), + ) # Optimize does nothing for integer when last assert optimize_read_slicers((slice(None), 1), (10, 6), 4, _always) == ( (slice(None), 1), @@ -623,14 +611,7 @@ def test_predict_shape(): def test_strided_scalar(): # Utility to make numpy array of given shape from scalar using striding for shape, scalar in product( - ( - (2,), - ( - 2, - 3, - ), - (2, 3, 4), - ), + ((2,), (2, 3), (2, 3, 4)), (1, 2, np.int16(3)), ): expected = np.zeros(shape, dtype=np.array(scalar).dtype) + scalar diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 321eb1b961..a06c180b84 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -46,17 +46,14 @@ def test_type_info(): for dtt in np.sctypes['int'] + np.sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) - assert ( - dict( - min=info.min, - max=info.max, - nexp=None, - nmant=None, - minexp=None, - maxexp=None, - width=np.dtype(dtt).itemsize, - ) - == infod + assert infod == dict( + min=info.min, + max=info.max, + nexp=None, + nmant=None, + minexp=None, + maxexp=None, + width=np.dtype(dtt).itemsize, ) assert infod['min'].dtype.type == dtt assert infod['max'].dtype.type == dtt @@ -74,11 +71,11 @@ def test_type_info(): # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html if vals in ( (52, 11, 8), # longdouble is same as double - (63, 15, 12), + (63, 15, 12), # intel 80 bit (63, 15, 16), # intel 80 bit (112, 15, 16), # real float128 - (106, 11, 16), - ): # PPC head, tail doubles, expected values + (106, 11, 16), # PPC head, tail doubles, expected values + ): pass elif vals == (105, 11, 16): # bust info for PPC head / tail longdoubles # min and max broken, copy from infod diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index f8186f4147..9fd48ee697 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -104,7 +104,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, msg): # Reuse the sniff... but it will only change for some # sniff_mode values. - msg = f'{expected_img_klass.__name__}/ {sniff_mode}/ ' f'{expect_success}' + msg = f'{expected_img_klass.__name__}/ {sniff_mode}/ {expect_success}' sniff = check_img( img_path, klass, diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 3eeefaa84b..be4f0deb07 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -40,7 +40,14 @@ fname=pjoin(data_path, 'tiny.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(2.0, 2.0, 2.0), # These values from SPM2 data_summary=dict(min=0.20784314, max=0.74901961, mean=0.60602819), @@ -50,7 +57,14 @@ fname=pjoin(data_path, 'minc1_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), @@ -60,7 +74,14 @@ fname=pjoin(data_path, 'minc1_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), @@ -70,7 +91,14 @@ fname=pjoin(data_path, 'minc1-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 1.0, 0], + [0, 1.0, 0, 0], + [1.0, 0, 0, 0], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index bd06456c33..251393818a 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -26,7 +26,14 @@ fname=pjoin(data_path, 'small.mnc'), shape=(18, 28, 29), dtype=np.int16, - affine=np.array([[0, 0, 7.0, -98], [0, 8.0, 0, -134], [9.0, 0, 0, -72], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 7.0, -98], + [0, 8.0, 0, -134], + [9.0, 0, 0, -72], + [0, 0, 0, 1], + ] + ), zooms=(9.0, 8.0, 7.0), # These values from mincstats data_summary=dict(min=0.1185331417, max=92.87690699, mean=31.2127952), @@ -36,7 +43,14 @@ fname=pjoin(data_path, 'minc2_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), @@ -46,7 +60,14 @@ fname=pjoin(data_path, 'minc2_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), @@ -56,7 +77,14 @@ fname=pjoin(data_path, 'minc2-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 1.0, 0], + [0, 1.0, 0, 0], + [1.0, 0, 0, 0], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 5219cb27ac..f993e342e4 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -328,9 +328,7 @@ def test_iter(): blue ridged mountains of virginia -""".split( - '\n' - ) +""".splitlines() with InTemporaryDirectory(): sobj = BytesIO() files_to_test = [ diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 16f7f5ce46..8821fac0e0 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -30,21 +30,61 @@ IN_ARRS = [ np.eye(4), - [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], - [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]], - [[3, 1, 0, 0], [1, 3, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], - [[1, 3, 0, 0], [3, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + [ + [0, 0, 1, 0], + [0, 1, 0, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + ], + [ + [0, 1, 0, 0], + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + ], + [ + [3, 1, 0, 0], + [1, 3, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], + [ + [1, 3, 0, 0], + [3, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], ] OUT_ORNTS = [ - [[0, 1], [1, 1], [2, 1]], - [[2, 1], [1, 1], [0, 1]], - [[2, 1], [0, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]], - [[1, 1], [0, 1], [2, 1]], + [ + [0, 1], + [1, 1], + [2, 1], + ], + [ + [2, 1], + [1, 1], + [0, 1], + ], + [ + [2, 1], + [0, 1], + [1, 1], + ], + [ + [0, 1], + [1, 1], + [2, 1], + ], + [ + [1, 1], + [0, 1], + [2, 1], + ], ] -IN_ARRS = IN_ARRS + [ +IN_ARRS.extend( [ [np.cos(np.pi / 6 + i * np.pi / 2), np.sin(np.pi / 6 + i * np.pi / 2), 0, 0], [-np.sin(np.pi / 6 + i * np.pi / 2), np.cos(np.pi / 6 + i * np.pi / 2), 0, 0], @@ -52,13 +92,29 @@ [0, 0, 0, 1], ] for i in range(4) -] +) -OUT_ORNTS = OUT_ORNTS + [ - [[0, 1], [1, 1], [2, 1]], - [[1, -1], [0, 1], [2, 1]], - [[0, -1], [1, -1], [2, 1]], - [[1, 1], [0, -1], [2, 1]], +OUT_ORNTS += [ + [ + [0, 1], + [1, 1], + [2, 1], + ], + [ + [1, -1], + [0, 1], + [2, 1], + ], + [ + [0, -1], + [1, -1], + [2, 1], + ], + [ + [1, 1], + [0, -1], + [2, 1], + ], ] @@ -159,12 +215,39 @@ def test_io_orientation(): ornt = io_orientation(arr) assert_array_equal( ornt, - [[0, 1], [1, 1], [2, 1], [3, 1], [np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], + [ + [0, 1], + [1, 1], + [2, 1], + [3, 1], + [np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan], + ], ) # Test behavior of thresholding - def_aff = np.array([[1.0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) - fail_tol = np.array([[0, 1], [np.nan, np.nan], [2, 1]]) - pass_tol = np.array([[0, 1], [1, 1], [2, 1]]) + def_aff = np.array( + [ + [1.0, 1, 0, 0], + [0, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + ) + fail_tol = np.array( + [ + [0, 1], + [np.nan, np.nan], + [2, 1], + ] + ) + pass_tol = np.array( + [ + [0, 1], + [1, 1], + [2, 1], + ] + ) eps = np.finfo(float).eps # Test that a Y axis appears as we increase the difference between the # first two columns @@ -190,22 +273,40 @@ def test_io_orientation(): aff_extra_col[:3, -1] = vec assert_array_equal( io_orientation(aff_extra_col, tol=1e-5), - [[0, 1], [np.nan, np.nan], [2, 1], [np.nan, np.nan]], + [ + [0, 1], + [np.nan, np.nan], + [2, 1], + [np.nan, np.nan], + ], ) aff_extra_row = np.zeros((5, 4)) aff_extra_row[-1, -1] = 1 # Not strictly necessary, but for completeness aff_extra_row[:3, :3] = mat aff_extra_row[:3, -1] = vec - assert_array_equal(io_orientation(aff_extra_row, tol=1e-5), [[0, 1], [np.nan, np.nan], [2, 1]]) + assert_array_equal( + io_orientation(aff_extra_row, tol=1e-5), + [ + [0, 1], + [np.nan, np.nan], + [2, 1], + ], + ) def test_ornt_transform(): assert_array_equal( - ornt_transform([[0, 1], [1, 1], [2, -1]], [[1, 1], [0, 1], [2, 1]]), + ornt_transform( + [[0, 1], [1, 1], [2, -1]], + [[1, 1], [0, 1], [2, 1]], + ), [[1, 1], [0, 1], [2, -1]], ) assert_array_equal( - ornt_transform([[0, 1], [1, 1], [2, 1]], [[2, 1], [0, -1], [1, 1]]), + ornt_transform( + [[0, 1], [1, 1], [2, 1]], + [[2, 1], [0, -1], [1, 1]], + ), [[1, -1], [2, 1], [0, 1]], ) # Must have same shape @@ -214,11 +315,17 @@ def test_ornt_transform(): # Must be (N,2) in shape with pytest.raises(ValueError): - ornt_transform([[0, 1, 1], [1, 1, 1]], [[0, 1, 1], [1, 1, 1]]) + ornt_transform( + [[0, 1, 1], [1, 1, 1]], + [[0, 1, 1], [1, 1, 1]], + ) # Target axes must exist in source with pytest.raises(ValueError): - ornt_transform([[0, 1], [1, 1], [1, 1]], [[0, 1], [1, 1], [2, 1]]) + ornt_transform( + [[0, 1], [1, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]], + ) def test_ornt2axcodes(): diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index e50b609da4..f1d81cf96c 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -310,35 +310,9 @@ def test_get_sorted_slice_indices(): hdr = PARRECHeader(HDR_INFO, HDR_DEFS[::-1]) assert_array_equal( hdr.get_sorted_slice_indices(), - [ - 8, - 7, - 6, - 5, - 4, - 3, - 2, - 1, - 0, - 17, - 16, - 15, - 14, - 13, - 12, - 11, - 10, - 9, - 26, - 25, - 24, - 23, - 22, - 21, - 20, - 19, - 18, - ], + [8, 7, 6, 5, 4, 3, 2, 1, 0] + + [17, 16, 15, 14, 13, 12, 11, 10, 9] + + [26, 25, 24, 23, 22, 21, 20, 19, 18], ) # Omit last slice, only two volumes with clear_and_catch_warnings(modules=[parrec], record=True): @@ -378,12 +352,12 @@ def test_sorting_multiple_echos_and_contrasts(): # This .PAR file has 3 echos and 4 image types (real, imaginary, magnitude, # phase). # After sorting should be: - # Type 0, Echo 1, Slices 1-30 - # Type 0, Echo 2, Slices 1-30 - # Type 0, Echo 3, Slices 1-30 - # Type 1, Echo 1, Slices 1-30 - # ... - # Type 3, Echo 3, Slices 1-30 + # Type 0, Echo 1, Slices 1-30 + # Type 0, Echo 2, Slices 1-30 + # Type 0, Echo 3, Slices 1-30 + # Type 1, Echo 1, Slices 1-30 + # ... + # Type 3, Echo 3, Slices 1-30 t1_par = pjoin(DATA_PATH, 'T1_3echo_mag_real_imag_phase.PAR') with open(t1_par, 'rt') as fobj: t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index dc877d3802..ffd1fbff2b 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -81,15 +81,37 @@ def test_adapt_affine(): # For 4x4 affine, 4D image, add extra identity dimension assert_array_equal( adapt_affine(aff_3d, 4), - [[0, 1, 2, 0, 11], [3, 4, 5, 0, 12], [6, 7, 8, 0, 13], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], + [ + [0, 1, 2, 0, 11], + [3, 4, 5, 0, 12], + [6, 7, 8, 0, 13], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 1], + ], ) # For 5x5 affine, 4D image, identity aff_4d = from_matvec(np.arange(16).reshape((4, 4)), [11, 12, 13, 14]) assert_array_equal(adapt_affine(aff_4d, 4), aff_4d) # For 4x4 affine, 2D image, dropped column - assert_array_equal(adapt_affine(aff_3d, 2), [[0, 1, 11], [3, 4, 12], [6, 7, 13], [0, 0, 1]]) + assert_array_equal( + adapt_affine(aff_3d, 2), + [ + [0, 1, 11], + [3, 4, 12], + [6, 7, 13], + [0, 0, 1], + ], + ) # For 4x4 affine, 1D image, 2 dropped columns - assert_array_equal(adapt_affine(aff_3d, 1), [[0, 11], [3, 12], [6, 13], [0, 1]]) + assert_array_equal( + adapt_affine(aff_3d, 1), + [ + [0, 11], + [3, 12], + [6, 13], + [0, 1], + ], + ) # For 3x3 affine, 2D image, identity aff_2d = from_matvec(np.arange(4).reshape((2, 2)), [11, 12]) assert_array_equal(adapt_affine(aff_2d, 2), aff_2d) @@ -267,7 +289,12 @@ def test_resample_to_output(caplog): exp_shape = (4, 4, 4) assert out_img.shape == exp_shape exp_aff = np.array( - [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] + [ + [1, 0, 0, -2 * np.cos(np.pi / 4)], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] ) assert_almost_equal(out_img.affine, exp_aff) rzs, trans = to_matvec(np.dot(npl.inv(rot_3), exp_aff)) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index dfac167690..1bdd6c26e8 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -70,7 +70,13 @@ def _some_slicers(shape): slicers[i, i] = 0 # Add a newaxis to keep us on our toes no_pos = ndim // 2 - slicers = np.hstack((slicers[:, :no_pos], np.empty((ndim, 1)), slicers[:, no_pos:])) + slicers = np.hstack( + ( + slicers[:, :no_pos], + np.empty((ndim, 1)), + slicers[:, no_pos:], + ) + ) slicers[:, no_pos] = None return [tuple(s) for s in slicers] @@ -236,7 +242,11 @@ def obj_params(self): slopes = (1.0, 2.0, float(np.float32(3.1416))) if self.has_slope else (1.0,) inters = (0.0, 10.0, float(np.float32(2.7183))) if self.has_inter else (0.0,) for shape, dtype, offset, slope, inter in product( - self.shapes, self.data_dtypes, offsets, slopes, inters + self.shapes, + self.data_dtypes, + offsets, + slopes, + inters, ): n_els = np.prod(shape) dtype = np.dtype(dtype).newbyteorder(self.data_endian) diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index 83dec9256c..dbfe533890 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -67,7 +67,12 @@ def get_outspace_params(): # x diff, y diff now 3 cos pi / 4 == 2.12, ceil to 3, add 1 # most negative x now 2 cos pi / 4 (4, 4, 4), - [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + [ + [1, 0, 0, -2 * np.cos(np.pi / 4)], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], ), # Less than 3 axes ((2, 3), np.eye(4), None, (2, 3), np.eye(4)), @@ -120,7 +125,14 @@ def test_slice2volume(): assert (slice2volume(val, axis) == exp_aff).all() -@pytest.mark.parametrize('index, axis', [[-1, 0], [0, -1], [0, 3]]) +@pytest.mark.parametrize( + 'index, axis', + [ + [-1, 0], + [0, -1], + [0, 3], + ], +) def test_slice2volume_exception(index, axis): with pytest.raises(ValueError): slice2volume(index, axis) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 2a1da21bdd..27305739aa 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -152,11 +152,23 @@ def test_data_dtype(): def test_affine(): hdr = SpatialHeader(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) assert_array_almost_equal( - hdr.get_best_affine(), [[-3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + hdr.get_best_affine(), + [ + [-3.0, 0, 0, 0], + [0, 2, 0, -1], + [0, 0, 1, -1], + [0, 0, 0, 1], + ], ) hdr.default_x_flip = False assert_array_almost_equal( - hdr.get_best_affine(), [[3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + hdr.get_best_affine(), + [ + [3.0, 0, 0, 0], + [0, 2, 0, -1], + [0, 0, 1, -1], + [0, 0, 0, 1], + ], ) assert np.array_equal(hdr.get_base_affine(), hdr.get_best_affine()) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index d8821d308b..b01195ff5f 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -1023,17 +1023,32 @@ def test_shape_zoom_affine(): zooms = (3, 2, 1) res = shape_zoom_affine(shape, zooms) exp = np.array( - [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ] ) assert_array_almost_equal(res, exp) res = shape_zoom_affine((3, 5), (3, 2)) exp = np.array( - [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -0.0], [0.0, 0.0, 0.0, 1.0]] + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ] ) assert_array_almost_equal(res, exp) res = shape_zoom_affine(shape, zooms, False) exp = np.array( - [[3.0, 0.0, 0.0, -3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + [ + [3.0, 0.0, 0.0, -3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ] ) assert_array_almost_equal(res, exp) @@ -1139,7 +1154,12 @@ def assert_rt( # check defense against modifying data in-place for in_cast, pre_clips, inter, slope, post_clips, nan_fill in itp( - (None, np.float32), (None, (-1, 25)), (0.0, 1.0), (1.0, 0.5), (None, (-2, 49)), (None, 1) + (None, np.float32), + (None, (-1, 25)), + (0.0, 1.0), + (1.0, 0.5), + (None, (-2, 49)), + (None, 1), ): data = np.arange(24).astype(np.float32) assert_rt( @@ -1185,8 +1205,8 @@ def read(self, n_bytes): except OSError as err: message = str(err) assert message == ( - 'Expected 11390625000000000000 bytes, got 0 ' - 'bytes from object\n - could the file be damaged?' + 'Expected 11390625000000000000 bytes, got 0 bytes from object\n' + ' - could the file be damaged?' ) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index d1c13dfeee..bb9f612a7d 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -319,7 +319,7 @@ def link_to(self, other): """ if not isinstance(other, self.__class__): raise TypeError( - 'other must be an instance of ' f'{self.__class__.__name__}, not {type(other)}' + f'other must be an instance of {self.__class__.__name__}, not {type(other)}' ) self._link(other, is_primary=True) From 6b0ddd23b1da1df7ca9ae275673f82bfa20a754c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 30 Dec 2022 11:01:19 -0500 Subject: [PATCH 134/702] STY: Run vanilla blue Add fmt off/on guards for tabular comments [git-blame-ignore-rev] --- nibabel/cifti2/cifti2.py | 2 +- nibabel/freesurfer/mghformat.py | 2 ++ nibabel/nicom/csareader.py | 10 +++++----- nibabel/nifti1.py | 4 +++- nibabel/nifti2.py | 2 ++ nibabel/parrec.py | 4 +++- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 6c141b44f1..713907cf66 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -70,7 +70,7 @@ class Cifti2HeaderError(Exception): CIFTI_MODEL_TYPES = ( 'CIFTI_MODEL_TYPE_SURFACE', # Modeled using surface vertices - 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. + 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. ) CIFTI_SERIESUNIT_TYPES = ( diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 1091bedbcb..b65c24f221 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -29,6 +29,7 @@ # See https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat DATA_OFFSET = 284 # Note that mgh data is strictly big endian ( hence the > sign ) +# fmt: off header_dtd = [ ('version', '>i4'), # 0; must be 1 ('dims', '>i4', (4,)), # 4; width, height, depth, nframes @@ -47,6 +48,7 @@ ('ti', '>f4'), # 12; inversion time ('fov', '>f4'), # 16; field of view (unused) ] +# fmt: on header_dtype = np.dtype(header_dtd) footer_dtype = np.dtype(footer_dtd) diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 961e93ecbb..40f3f852d9 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -10,11 +10,11 @@ 'FL': float, # float 'FD': float, # double 'DS': float, # decimal string - 'SS': int, # signed short - 'US': int, # unsigned short - 'SL': int, # signed long - 'UL': int, # unsigned long - 'IS': int, # integer string + 'SS': int, # signed short + 'US': int, # unsigned short + 'SL': int, # signed long + 'UL': int, # unsigned long + 'IS': int, # integer string } MAX_CSA_ITEMS = 1000 diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 392bf5c2ad..a5079d3d89 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -32,6 +32,7 @@ # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes +# fmt: off header_dtd = [ ('sizeof_hdr', 'i4'), # 0; must be 348 ('data_type', 'S10'), # 4; unused @@ -75,8 +76,9 @@ ('srow_y', 'f4', (4,)), # 296; 2nd row affine transform ('srow_z', 'f4', (4,)), # 312; 3rd row affine transform ('intent_name', 'S16'), # 328; name or meaning of data - ('magic', 'S4'), # 344; must be 'ni1\0' or 'n+1\0' + ('magic', 'S4'), # 344; must be 'ni1\0' or 'n+1\0' ] +# fmt: on # Full header numpy dtype header_dtype = np.dtype(header_dtd) diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 01a918e445..c0106ae29d 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -74,6 +74,7 @@ # nifti2 flat header definition for first 540 bytes # First number in comments indicates offset in file header in bytes +# fmt: off header_dtd = [ ('sizeof_hdr', 'i4'), # 0; must be 540 ('magic', 'S4'), # 4; must be 'ni2\0' or 'n+2\0' @@ -114,6 +115,7 @@ ('dim_info', 'u1'), # 524; MRI slice ordering code ('unused_str', 'S15'), # 525; unused, filled with \0 ] # total 540 +# fmt: on # Full header numpy dtype header_dtype = np.dtype(header_dtd) diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 81e956f2b8..1459f3460e 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -144,7 +144,7 @@ [ [0, 0, -1, 0], # L -> R [-1, 0, 0, 0], # P -> A - [0, 1, 0, 0], # S -> S + [0, 1, 0, 0], # S -> S [0, 0, 0, 1], ] ) @@ -269,6 +269,7 @@ ] # Extra image def fields for 4.1 compared to 4 +# fmt: off image_def_dtds['V4.1'] = image_def_dtds['V4'] + [ ('diffusion b value number', int), # (imagekey!) ('gradient orientation number', int), # (imagekey!) @@ -281,6 +282,7 @@ image_def_dtds['V4.2'] = image_def_dtds['V4.1'] + [ ('label type', int), # (imagekey!) ] +# fmt: on #: PAR header versions we claim to understand supported_versions = list(image_def_dtds.keys()) From 4481a4c2640bd4be6e9c468e550d01aae448ab99 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 22:32:46 -0500 Subject: [PATCH 135/702] STY: pyupgrade --py37-plus Mostly motivated by excessive use of arguments to super(). Also caught a number of `np.array(X).astype(Y)` to convert to `np.array(X, Y)`. [git-blame-ignore-rev] --- nibabel/analyze.py | 6 +- nibabel/arraywriters.py | 18 +- nibabel/brikhead.py | 12 +- nibabel/cifti2/cifti2.py | 2 +- nibabel/cifti2/parse_cifti2.py | 6 +- nibabel/cmdline/diff.py | 2 +- nibabel/cmdline/roi.py | 2 +- nibabel/cmdline/tests/test_utils.py | 162 ++++++++---------- nibabel/dataobj_images.py | 2 +- nibabel/deprecated.py | 2 +- nibabel/ecat.py | 4 +- nibabel/freesurfer/io.py | 10 +- nibabel/freesurfer/mghformat.py | 10 +- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/parse_gifti_fast.py | 4 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 2 +- nibabel/nicom/ascconv.py | 2 +- nibabel/nicom/dicomwrappers.py | 12 +- nibabel/nicom/tests/test_ascconv.py | 2 +- nibabel/nifti1.py | 20 +-- nibabel/nifti2.py | 4 +- nibabel/parrec.py | 4 +- nibabel/spatialimages.py | 2 +- nibabel/spm99analyze.py | 10 +- nibabel/streamlines/tck.py | 4 +- .../streamlines/tests/test_array_sequence.py | 4 +- nibabel/streamlines/tractogram.py | 6 +- nibabel/streamlines/tractogram_file.py | 2 +- nibabel/streamlines/trk.py | 10 +- nibabel/testing/__init__.py | 8 +- nibabel/tests/test_analyze.py | 6 +- nibabel/tests/test_arrayproxy.py | 2 +- nibabel/tests/test_casting.py | 4 +- nibabel/tests/test_data.py | 18 +- nibabel/tests/test_filebasedimages.py | 2 +- nibabel/tests/test_fileslice.py | 3 +- nibabel/tests/test_funcs.py | 4 +- nibabel/tests/test_image_api.py | 2 +- nibabel/tests/test_nifti1.py | 14 +- nibabel/tests/test_openers.py | 6 +- nibabel/tests/test_parrec.py | 38 ++-- nibabel/tests/test_recoder.py | 8 +- nibabel/tests/test_scripts.py | 4 +- nibabel/tests/test_spm99analyze.py | 2 +- nibabel/tests/test_volumeutils.py | 8 +- nibabel/tests/test_wrapstruct.py | 2 +- nibabel/tmpdirs.py | 4 +- nibabel/viewers.py | 2 +- nibabel/volumeutils.py | 8 +- nibabel/xmlutils.py | 2 +- 50 files changed, 218 insertions(+), 257 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index e128239865..e165112259 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -248,7 +248,7 @@ def __init__(self, binaryblock=None, endianness=None, check=True): >>> hdr4.endianness == swapped_code True """ - super(AnalyzeHeader, self).__init__(binaryblock, endianness, check) + super().__init__(binaryblock, endianness, check) @classmethod def guessed_endian(klass, hdr): @@ -336,7 +336,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): """Return header data for empty header with given endianness""" - hdr_data = super(AnalyzeHeader, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['sizeof_hdr'] = klass.sizeof_hdr hdr_data['dim'] = 1 hdr_data['dim'][0] = 0 @@ -904,7 +904,7 @@ class AnalyzeImage(SpatialImage): ImageArrayProxy = ArrayProxy def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None): - super(AnalyzeImage, self).__init__(dataobj, affine, header, extra, file_map) + super().__init__(dataobj, affine, header, extra, file_map) # Reset consumable values self._header.set_data_offset(0) self._header.set_slope_inter(None, None) diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 21fd6ba6ee..5a0b04925e 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -313,7 +313,7 @@ def scaling_needed(self): data are within range of the output type, return False * Otherwise return True """ - if not super(SlopeArrayWriter, self).scaling_needed(): + if not super().scaling_needed(): return False mn, mx = self.finite_range() # this is cached # No finite data - no scaling needed @@ -428,7 +428,7 @@ def _range_scale(self, in_min, in_max): # not lose precision because min/max are of fp type. out_min, out_max = np.array((out_min, out_max), dtype=big_float) else: # (u)int - out_min, out_max = [int_to_float(v, big_float) for v in (out_min, out_max)] + out_min, out_max = (int_to_float(v, big_float) for v in (out_min, out_max)) if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: raise WriterError( @@ -507,13 +507,11 @@ def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float >>> (aw.slope, aw.inter) == (1.0, 128) True """ - super(SlopeInterArrayWriter, self).__init__( - array, out_dtype, calc_scale, scaler_dtype, **kwargs - ) + super().__init__(array, out_dtype, calc_scale, scaler_dtype, **kwargs) def reset(self): """Set object to values before any scaling calculation""" - super(SlopeInterArrayWriter, self).reset() + super().reset() self.inter = 0.0 def _get_inter(self): @@ -549,14 +547,14 @@ def to_fileobj(self, fileobj, order='F'): def _iu2iu(self): # (u)int to (u)int - mn, mx = [as_int(v) for v in self.finite_range()] + mn, mx = (as_int(v) for v in self.finite_range()) # range may be greater than the largest integer for this type. # as_int needed to work round numpy 1.4.1 int casting bug out_dtype = self._out_dtype # Options in this method are scaling using intercept only. These will # have to pass through ``self.scaler_dtype`` (because the intercept is # in this type). - o_min, o_max = [as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)] + o_min, o_max = (as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)) type_range = o_max - o_min mn2mx = mx - mn if mn2mx <= type_range: # might offset be enough? @@ -579,7 +577,7 @@ def _iu2iu(self): self.inter = inter return # Try slope options (sign flip) and then range scaling - super(SlopeInterArrayWriter, self)._iu2iu() + super()._iu2iu() def _range_scale(self, in_min, in_max): """Calculate scaling, intercept based on data range and output type""" @@ -604,7 +602,7 @@ def _range_scale(self, in_min, in_max): in_min, in_max = as_int(in_min), as_int(in_max) in_range = int_to_float(in_max - in_min, big_float) # Cast to float for later processing. - in_min, in_max = [int_to_float(v, big_float) for v in (in_min, in_max)] + in_min, in_max = (int_to_float(v, big_float) for v in (in_min, in_max)) if out_dtype.kind == 'f': # Type range, these are also floats info = type_info(out_dtype) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 72b09c4d75..470ed16664 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -195,7 +195,7 @@ def parse_AFNI_header(fobj): """ # edge case for being fed a filename instead of a file object if isinstance(fobj, str): - with open(fobj, 'rt') as src: + with open(fobj) as src: return parse_AFNI_header(src) # unpack variables in HEAD file head = fobj.read().split('\n\n') @@ -239,9 +239,7 @@ def __init__(self, file_like, header, *, mmap=True, keep_file_open=None): effect. The default value (``None``) will result in the value of ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. """ - super(AFNIArrayProxy, self).__init__( - file_like, header, mmap=mmap, keep_file_open=keep_file_open - ) + super().__init__(file_like, header, mmap=mmap, keep_file_open=keep_file_open) self._scaling = header.get_data_scaling() @property @@ -293,9 +291,7 @@ def __init__(self, info): """ self.info = info dt = _get_datatype(self.info) - super(AFNIHeader, self).__init__( - data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() - ) + super().__init__(data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms()) @classmethod def from_header(klass, header=None): @@ -553,7 +549,7 @@ def filespec_to_file_map(klass, filespec): If `filespec` is not recognizable as being a filename for this image type. """ - file_map = super(AFNIImage, klass).filespec_to_file_map(filespec) + file_map = super().filespec_to_file_map(filespec) # check for AFNI-specific BRIK/HEAD compression idiosyncrasies for key, fholder in file_map.items(): fname = fholder.filename diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 713907cf66..423dbfbf9d 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -1451,7 +1451,7 @@ def __init__( """ if not isinstance(header, Cifti2Header) and header: header = Cifti2Header.from_axes(header) - super(Cifti2Image, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) + super().__init__(dataobj, header=header, extra=extra, file_map=file_map) self._nifti_header = LimitedNifti2Header.from_header(nifti_header) # if NIfTI header not specified, get data type from input array diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index e067144997..c7bfb953f9 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -91,7 +91,7 @@ def _valid_intent_code(klass, intent_code): @classmethod def may_contain_header(klass, binaryblock): - if not super(_Cifti2AsNiftiHeader, klass).may_contain_header(binaryblock): + if not super().may_contain_header(binaryblock): return False hdr = klass(binaryblock=binaryblock[: klass.sizeof_hdr]) return klass._valid_intent_code(hdr.get_intent('code')[0]) @@ -135,9 +135,7 @@ class Cifti2Parser(xml.XmlParser): """Class to parse an XML string into a CIFTI-2 header object""" def __init__(self, encoding=None, buffer_size=3500000, verbose=0): - super(Cifti2Parser, self).__init__( - encoding=encoding, buffer_size=buffer_size, verbose=verbose - ) + super().__init__(encoding=encoding, buffer_size=buffer_size, verbose=verbose) self.fsm_state = [] self.struct_state = [] diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 799e17f645..b409c7205d 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -248,7 +248,7 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): # Since we operated on sub-selected values already, we need # to plug them back in candidates[ - tuple((indexes[sub_thr] for indexes in np.where(candidates))) + tuple(indexes[sub_thr] for indexes in np.where(candidates)) ] = False max_rel_diff = np.max(rel_diff) else: diff --git a/nibabel/cmdline/roi.py b/nibabel/cmdline/roi.py index 36f00a033a..ea47970043 100644 --- a/nibabel/cmdline/roi.py +++ b/nibabel/cmdline/roi.py @@ -22,7 +22,7 @@ def lossless_slice(img, slicers): def parse_slice(crop, allow_step=True): if crop is None: return slice(None) - start, stop, *extra = [int(val) if val else None for val in crop.split(':')] + start, stop, *extra = (int(val) if val else None for val in crop.split(':')) if len(extra) > 1: raise ValueError(f'Cannot parse specification: {crop}') if not allow_step and extra and extra[0] not in (1, None): diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 6d2e6953fb..8143d648d9 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -58,16 +58,16 @@ def test_get_headers_diff(): fnames = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) expected_difference = { - 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], - 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'regular': [np.asarray(b''), np.asarray(b'r')], + 'dim_info': [np.asarray(0, 'uint8'), np.asarray(57, 'uint8')], 'dim': [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + np.array([3, 4, 5, 7, 1, 1, 1, 1], 'int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1], 'int16'), ], - 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], - 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'datatype': [np.array(2, 'uint8'), np.array(4, 'uint8')], + 'bitpix': [np.array(8, 'uint8'), np.array(16, 'uint8')], 'pixdim': [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0], 'float32'), np.array( [ -1.00000000e00, @@ -78,64 +78,57 @@ def test_get_headers_diff(): 1.00000000e00, 1.00000000e00, 1.00000000e00, - ] - ).astype(dtype='float32'), + ], + 'float32', + ), ], - 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], - 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'slice_end': [np.array(0, 'uint8'), np.array(23, 'uint8')], + 'xyzt_units': [np.array(0, 'uint8'), np.array(10, 'uint8')], 'cal_max': [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.asarray(1162.0, 'float32'), ], 'descrip': [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), + np.array(b'', 'S80'), + np.array(b'FSL3.3\x00 v2.25 NIfTI-1 Single file format', 'S80'), ], - 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], - 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'qform_code': [np.array(0, 'int16'), np.array(1, 'int16')], + 'sform_code': [np.array(2, 'int16'), np.array(1, 'int16')], 'quatern_b': [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-1.9451068140294884e-26, 'float32'), ], 'quatern_c': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.9967085123062134, 'float32'), ], 'quatern_d': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.0810687392950058, 'float32'), ], 'qoffset_x': [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(117.8551025390625, 'float32'), ], 'qoffset_y': [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-35.72294235229492, 'float32'), ], 'qoffset_z': [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-7.248798370361328, 'float32'), ], 'srow_x': [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( - dtype='float32' - ), + np.array([1.0, 0.0, 0.0, 0.0], 'float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02], 'float32'), ], 'srow_y': [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( - dtype='float32' - ), + np.array([0.0, 3.0, 0.0, 0.0], 'float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01], 'float32'), ], 'srow_z': [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( - dtype='float32' - ), + np.array([0.0, 0.0, 2.0, 0.0], 'float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00], 'float32'), ], } @@ -146,8 +139,8 @@ def test_display_diff(): bogus_names = ['hellokitty.nii.gz', 'privettovarish.nii.gz'] dict_values = { - 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], - 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'datatype': [np.array(2, 'uint8'), np.array(4, 'uint8')], + 'bitpix': [np.array(8, 'uint8'), np.array(16, 'uint8')], } expected_output = """\ @@ -220,16 +213,16 @@ def test_get_data_diff(): def test_main(): test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] expected_difference = { - 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], - 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'regular': [np.asarray(b''), np.asarray(b'r')], + 'dim_info': [np.asarray(0, 'uint8'), np.asarray(57, 'uint8')], 'dim': [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + np.array([3, 4, 5, 7, 1, 1, 1, 1], 'int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1], 'int16'), ], - 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], - 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'datatype': [np.array(2, 'uint8'), np.array(4, 'uint8')], + 'bitpix': [np.array(8, 'uint8'), np.array(16, 'uint8')], 'pixdim': [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0], 'float32'), np.array( [ -1.00000000e00, @@ -240,64 +233,57 @@ def test_main(): 1.00000000e00, 1.00000000e00, 1.00000000e00, - ] - ).astype(dtype='float32'), + ], + 'float32', + ), ], - 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], - 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'slice_end': [np.array(0, 'uint8'), np.array(23, 'uint8')], + 'xyzt_units': [np.array(0, 'uint8'), np.array(10, 'uint8')], 'cal_max': [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.asarray(1162.0, 'float32'), ], 'descrip': [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), + np.array(b'', 'S80'), + np.array(b'FSL3.3\x00 v2.25 NIfTI-1 Single file format', 'S80'), ], - 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], - 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'qform_code': [np.array(0, 'int16'), np.array(1, 'int16')], + 'sform_code': [np.array(2, 'int16'), np.array(1, 'int16')], 'quatern_b': [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-1.9451068140294884e-26, 'float32'), ], 'quatern_c': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.9967085123062134, 'float32'), ], 'quatern_d': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.0810687392950058, 'float32'), ], 'qoffset_x': [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(117.8551025390625, 'float32'), ], 'qoffset_y': [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-35.72294235229492, 'float32'), ], 'qoffset_z': [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-7.248798370361328, 'float32'), ], 'srow_x': [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( - dtype='float32' - ), + np.array([1.0, 0.0, 0.0, 0.0], 'float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02], 'float32'), ], 'srow_y': [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( - dtype='float32' - ), + np.array([0.0, 3.0, 0.0, 0.0], 'float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01], 'float32'), ], 'srow_z': [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( - dtype='float32' - ), + np.array([0.0, 0.0, 2.0, 0.0], 'float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00], 'float32'), ], 'DATA(md5)': ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], } diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 64ef906820..5c8de66674 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -38,7 +38,7 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): file_map : mapping, optional mapping giving file information for this image format """ - super(DataobjImage, self).__init__(header=header, extra=extra, file_map=file_map) + super().__init__(header=header, extra=extra, file_map=file_map) self._dataobj = dataobj self._fdata_cache = None self._data_cache = None diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 900c0fcf4d..aa41675dbd 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -60,7 +60,7 @@ class FutureWarningMixin: def __init__(self, *args, **kwargs): warnings.warn(self.warn_message, FutureWarning, stacklevel=2) - super(FutureWarningMixin, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class VisibleDeprecationWarning(UserWarning): diff --git a/nibabel/ecat.py b/nibabel/ecat.py index d151465933..5217bd1333 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -278,7 +278,7 @@ def __init__(self, binaryblock=None, endianness=None, check=True): Whether to check and fix header for errors. No checks currently implemented, so value has no effect. """ - super(EcatHeader, self).__init__(binaryblock, endianness, check) + super().__init__(binaryblock, endianness, check) @classmethod def guessed_endian(klass, hdr): @@ -291,7 +291,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): """Return header data for empty header with given endianness""" - hdr_data = super(EcatHeader, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['magic_number'] = 'MATRIX72' hdr_data['sw_version'] = 74 hdr_data['num_frames'] = 0 diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index b6f003b984..6e8538c202 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -229,7 +229,7 @@ def write_geometry(filepath, coords, faces, create_stamp=None, volume_info=None) with open(filepath, 'wb') as fobj: magic_bytes.tofile(fobj) - fobj.write((f'{create_stamp}\n\n').encode('utf-8')) + fobj.write((f'{create_stamp}\n\n').encode()) np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj) @@ -610,13 +610,11 @@ def _serialize_volume_info(volume_info): strings.append(np.array(volume_info[key], dtype='>i4').tobytes()) elif key in ('valid', 'filename'): val = volume_info[key] - strings.append(f'{key} = {val}\n'.encode('utf-8')) + strings.append(f'{key} = {val}\n'.encode()) elif key == 'volume': val = volume_info[key] - strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode('utf-8')) + strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode()) else: val = volume_info[key] - strings.append( - f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode('utf-8') - ) + strings.append(f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode()) return b''.join(strings) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index b65c24f221..cb86b4400b 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -119,7 +119,7 @@ def __init__(self, binaryblock=None, check=True): # Footer is optional and may contain variable-length text fields, # so limit to fixed fields binaryblock = binaryblock[:full_size] + b'\x00' * (full_size - len(binaryblock)) - super(MGHHeader, self).__init__(binaryblock=binaryblock, endianness='big', check=False) + super().__init__(binaryblock=binaryblock, endianness='big', check=False) if not self._structarr['goodRASFlag']: self._set_affine_default() if check: @@ -367,7 +367,7 @@ def default_structarr(klass, endianness=None): """ if endianness is not None and endian_codes[endianness] != '>': raise ValueError('MGHHeader must always be big endian') - structarr = super(MGHHeader, klass).default_structarr(endianness=endianness) + structarr = super().default_structarr(endianness=endianness) structarr['version'] = 1 structarr['dims'] = 1 structarr['type'] = 3 @@ -477,9 +477,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): shape = dataobj.shape if len(shape) < 3: dataobj = reshape_dataobj(dataobj, shape + (1,) * (3 - len(shape))) - super(MGHImage, self).__init__( - dataobj, affine, header=header, extra=extra, file_map=file_map - ) + super().__init__(dataobj, affine, header=header, extra=extra, file_map=file_map) @classmethod def filespec_to_file_map(klass, filespec): @@ -487,7 +485,7 @@ def filespec_to_file_map(klass, filespec): """ Check for compressed .mgz format, then .mgh format """ if splitext(filespec)[1].lower() == '.mgz': return dict(image=FileHolder(filename=filespec)) - return super(MGHImage, klass).filespec_to_file_map(filespec) + return super().filespec_to_file_map(filespec) @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index dc205d8004..c80fbf2e22 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -589,7 +589,7 @@ def __init__( darrays=None, version='1.0', ): - super(GiftiImage, self).__init__(header=header, extra=extra, file_map=file_map) + super().__init__(header=header, extra=extra, file_map=file_map) if darrays is None: darrays = [] if meta is None: diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 68dfb00af8..e4a9be4bd6 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -150,9 +150,7 @@ def _str2int(in_str): class GiftiImageParser(XmlParser): def __init__(self, encoding=None, buffer_size=35000000, verbose=0, mmap=True): - super(GiftiImageParser, self).__init__( - encoding=encoding, buffer_size=buffer_size, verbose=verbose - ) + super().__init__(encoding=encoding, buffer_size=buffer_size, verbose=verbose) # output self.img = None diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index c7a958a5f8..f08bdd1b17 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -368,7 +368,7 @@ def test_parse_dataarrays(): with InTemporaryDirectory(): save(img, fn) - with open(fn, 'r') as fp: + with open(fn) as fp: txt = fp.read() # Make a bad gifti. txt = txt.replace('NumberOfDataArrays="0"', 'NumberOfDataArrays ="1"') diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index d03845f900..be6da9786c 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -205,7 +205,7 @@ def parse_ascconv(ascconv_str, str_delim='"'): A line of the ASCCONV section could not be parsed. """ attrs, content = ASCCONV_RE.match(ascconv_str).groups() - attrs = OrderedDict((tuple(x.split('=')) for x in attrs.split())) + attrs = OrderedDict(tuple(x.split('=')) for x in attrs.split()) # Normalize string start / end markers to something Python understands content = content.replace(str_delim, '"""').replace('\\', '\\\\') # Use Python's own parser to parse modified ASCCONV assignments diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 9290d6c376..7e6bea9009 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -533,7 +533,7 @@ def image_shape(self): [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] ) # Check that there is only one multiframe stack index - stack_ids = set(frame.FrameContentSequence[0].StackID for frame in self.frames) + stack_ids = {frame.FrameContentSequence[0].StackID for frame in self.frames} if len(stack_ids) > 1: raise WrapperError( 'File contains more than one StackID. Cannot handle multi-stack files' @@ -645,7 +645,7 @@ def get_data(self): def _scale_data(self, data): pix_trans = getattr(self.frames[0], 'PixelValueTransformationSequence', None) if pix_trans is None: - return super(MultiframeWrapper, self)._scale_data(data) + return super()._scale_data(data) scale = float(pix_trans[0].RescaleSlope) offset = float(pix_trans[0].RescaleIntercept) return self._apply_scale_offset(data, scale, offset) @@ -681,7 +681,7 @@ def __init__(self, dcm_data, csa_header=None): None, we try and read the CSA information from `dcm_data`. If this fails, we fall back to an empty dict. """ - super(SiemensWrapper, self).__init__(dcm_data) + super().__init__(dcm_data) if dcm_data is None: dcm_data = {} self.dcm_data = dcm_data @@ -695,7 +695,7 @@ def __init__(self, dcm_data, csa_header=None): def slice_normal(self): # The std_slice_normal comes from the cross product of the directions # in the ImageOrientationPatient - std_slice_normal = super(SiemensWrapper, self).slice_normal + std_slice_normal = super().slice_normal csa_slice_normal = csar.get_slice_normal(self.csa_header) if std_slice_normal is None and csa_slice_normal is None: return None @@ -718,7 +718,7 @@ def slice_normal(self): @one_time def series_signature(self): """Add ICE dims from CSA header to signature""" - signature = super(SiemensWrapper, self).series_signature + signature = super().series_signature ice = csar.get_ice_dims(self.csa_header) if ice is not None: ice = ice[:6] + ice[8:9] @@ -861,7 +861,7 @@ def image_position(self): img_pos : (3,) array position in mm of voxel (0,0,0) in Mosaic array """ - ipp = super(MosaicWrapper, self).image_position + ipp = super().image_position # mosaic image size md_rows, md_cols = (self.get('Rows'), self.get('Columns')) iop = self.image_orient_patient diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index 4737d3615d..cd27bc3192 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -15,7 +15,7 @@ def test_ascconv_parse(): - with open(ASCCONV_INPUT, 'rt') as fobj: + with open(ASCCONV_INPUT) as fobj: contents = fobj.read() ascconv_dict, attrs = ascconv.parse_ascconv(contents, str_delim='""') assert attrs == OrderedDict() diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a5079d3d89..a10686145b 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -688,7 +688,7 @@ class Nifti1Header(SpmAnalyzeHeader): def __init__(self, binaryblock=None, endianness=None, check=True, extensions=()): """Initialize header from binary data block and extensions""" - super(Nifti1Header, self).__init__(binaryblock, endianness, check) + super().__init__(binaryblock, endianness, check) self.extensions = self.exts_klass(extensions) def copy(self): @@ -730,7 +730,7 @@ def write_to(self, fileobj): raise HeaderDataError( f'vox offset set to {vox_offset}, but need at least {min_vox_offset}' ) - super(Nifti1Header, self).write_to(fileobj) + super().write_to(fileobj) # Write extensions if len(self.extensions) == 0: # If single file, write required 0 stream to signal no extensions @@ -754,7 +754,7 @@ def get_best_affine(self): @classmethod def default_structarr(klass, endianness=None): """Create empty header binary block with given endianness""" - hdr_data = super(Nifti1Header, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) if klass.is_single: hdr_data['magic'] = klass.single_magic else: @@ -781,7 +781,7 @@ def from_header(klass, header=None, check=True): hdr : header instance fresh header instance of our own class """ - new_hdr = super(Nifti1Header, klass).from_header(header, check) + new_hdr = super().from_header(header, check) if isinstance(header, Nifti1Header): new_hdr.extensions[:] = header.extensions[:] return new_hdr @@ -811,7 +811,7 @@ def get_data_shape(self): Allows for freesurfer hack for 7th order icosahedron surface described in `issue 309`_, load_nifti.m_, and `save_nifti.m `_. """ - shape = super(Nifti1Header, self).get_data_shape() + shape = super().get_data_shape() # Apply freesurfer hack for large vectors if shape[:3] == (-1, 1, 1): vec_len = int(self._structarr['glmin']) @@ -903,7 +903,7 @@ def set_data_shape(self, shape): stacklevel=2, ) shape = (-1, 1, 1) + shape[3:] - super(Nifti1Header, self).set_data_shape(shape) + super().set_data_shape(shape) def set_data_dtype(self, datatype): """Set numpy dtype for data from code or dtype or type @@ -1838,7 +1838,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtyp f'by passing the dtype argument to {self.__class__.__name__}().' ) warnings.warn(msg, FutureWarning, stacklevel=2) - super(Nifti1Pair, self).__init__(dataobj, affine, header, extra, file_map, dtype) + super().__init__(dataobj, affine, header, extra, file_map, dtype) # Force set of s/q form when header is None unless affine is also None if header is None and affine is not None: self._affine2header() @@ -1877,7 +1877,7 @@ def update_header(self): >>> np.all(hdr.get_sform() == affine) True """ - super(Nifti1Pair, self).update_header() + super().update_header() hdr = self._header hdr['magic'] = hdr.pair_magic @@ -2232,7 +2232,7 @@ def as_reoriented(self, ornt): the transpose that needs to be done to the implied array, as in ``arr.transpose(ornt[:,0])`` """ - img = super(Nifti1Pair, self).as_reoriented(ornt) + img = super().as_reoriented(ornt) if img is self: return img @@ -2266,7 +2266,7 @@ def _get_fileholders(file_map): def update_header(self): """Harmonize header with image data and affine""" - super(Nifti1Image, self).update_header() + super().update_header() hdr = self._header hdr['magic'] = hdr.single_magic diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index c0106ae29d..cb138962cc 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -188,7 +188,7 @@ def set_data_shape(self, shape): @classmethod def default_structarr(klass, endianness=None): """Create empty header binary block with given endianness""" - hdr_data = super(Nifti2Header, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['eol_check'] = (13, 10, 26, 10) return hdr_data @@ -197,7 +197,7 @@ def default_structarr(klass, endianness=None): @classmethod def _get_checks(klass): # Add our own checks - return super(Nifti2Header, klass)._get_checks() + (klass._chk_eol_check,) + return super()._get_checks() + (klass._chk_eol_check,) @staticmethod def _chk_eol_check(hdr, fix=False): diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 1459f3460e..27ade56ae9 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -754,9 +754,7 @@ def __init__(self, info, image_defs, permit_truncated=False, strict_sort=False): ) # REC data always little endian dt = np.dtype('uint' + str(bitpix)).newbyteorder('<') - super(PARRECHeader, self).__init__( - data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() - ) + super().__init__(data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms()) @classmethod def from_header(klass, header=None): diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index c582ee149b..d969e57745 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -428,7 +428,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): file_map : mapping, optional mapping giving file information for this image format """ - super(SpatialImage, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) + super().__init__(dataobj, header=header, extra=extra, file_map=file_map) if affine is not None: # Check that affine is array-like 4,4. Maybe this is too strict at # this abstract level, but so far I think all image formats we know diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 7a2f176318..12e3cb658d 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -45,7 +45,7 @@ class SpmAnalyzeHeader(analyze.AnalyzeHeader): @classmethod def default_structarr(klass, endianness=None): """Create empty header binary block with given endianness""" - hdr_data = super(SpmAnalyzeHeader, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['scl_slope'] = 1 return hdr_data @@ -206,7 +206,7 @@ def set_origin_from_affine(self, affine): @classmethod def _get_checks(klass): - checks = super(Spm99AnalyzeHeader, klass)._get_checks() + checks = super()._get_checks() return checks + (klass._chk_origin,) @staticmethod @@ -264,9 +264,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): img : Spm99AnalyzeImage instance """ - ret = super(Spm99AnalyzeImage, klass).from_file_map( - file_map, mmap=mmap, keep_file_open=keep_file_open - ) + ret = super().from_file_map(file_map, mmap=mmap, keep_file_open=keep_file_open) try: matf = file_map['mat'].get_prepare_fileobj() except OSError: @@ -312,7 +310,7 @@ def to_file_map(self, file_map=None, dtype=None): """ if file_map is None: file_map = self.file_map - super(Spm99AnalyzeImage, self).to_file_map(file_map, dtype=dtype) + super().to_file_map(file_map, dtype=dtype) mat = self._affine if mat is None: return diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 7738a0e069..ec8e7dbce7 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -68,7 +68,7 @@ def __init__(self, tractogram, header=None): This is in contrast with TRK's internal convention where it would have referred to a corner. """ - super(TckFile, self).__init__(tractogram, header) + super().__init__(tractogram, header) @classmethod def is_correct_format(cls, fileobj): @@ -288,7 +288,7 @@ def _write_header(fileobj, header): # Write header to file. fileobj.write(out) - fileobj.write(f'\nfile: . {hdr_offset}\nEND\n'.encode('utf-8')) + fileobj.write(f'\nfile: . {hdr_offset}\nEND\n'.encode()) @classmethod def _read_header(cls, fileobj): diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index a3faa6a58b..0c8557fe50 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -89,7 +89,7 @@ def test_creating_arraysequence_from_list(self): check_arr_seq(ArraySequence(iter(SEQ_DATA['data']), buffer_size), SEQ_DATA['data']) def test_creating_arraysequence_from_generator(self): - gen_1, gen_2 = itertools.tee((e for e in SEQ_DATA['data'])) + gen_1, gen_2 = itertools.tee(e for e in SEQ_DATA['data']) seq = ArraySequence(gen_1) seq_with_buffer = ArraySequence(gen_2, buffer_size=256) @@ -189,7 +189,7 @@ def test_arraysequence_extend(self): # Extend with a generator. seq = SEQ_DATA['seq'].copy() # Copy because of in-place modification. - seq.extend((d for d in new_data)) + seq.extend(d for d in new_data) check_arr_seq(seq, SEQ_DATA['data'] + new_data) # Extend with another `ArraySequence` object. diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index ded937ab11..9e7c0f9af2 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -98,7 +98,7 @@ class PerArrayDict(SliceableDataDict): def __init__(self, n_rows=0, *args, **kwargs): self.n_rows = n_rows - super(PerArrayDict, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def __setitem__(self, key, value): value = np.asarray(list(value)) @@ -604,9 +604,7 @@ def __init__( refers to the center of the voxel. By default, the streamlines are in an unknown space, i.e. affine_to_rasmm is None. """ - super(LazyTractogram, self).__init__( - streamlines, data_per_streamline, data_per_point, affine_to_rasmm - ) + super().__init__(streamlines, data_per_streamline, data_per_point, affine_to_rasmm) self._nb_streamlines = None self._data = None self._affine_to_apply = np.eye(4) diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index 321ea3d2ad..2cec1ea9cb 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -30,7 +30,7 @@ class abstractclassmethod(classmethod): def __init__(self, callable): callable.__isabstractmethod__ = True - super(abstractclassmethod, self).__init__(callable) + super().__init__(callable) class TractogramFile(ABC): diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index b32e12d8b3..4f570a2803 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -237,7 +237,7 @@ def __init__(self, tractogram, header=None): and *mm* space where coordinate (0,0,0) refers to the center of the voxel. """ - super(TrkFile, self).__init__(tractogram, header) + super().__init__(tractogram, header) @classmethod def is_correct_format(cls, fileobj): @@ -359,9 +359,9 @@ def load(cls, fileobj, lazy_load=False): def _read(): for pts, scals, props in cls._read(fileobj, hdr): items = data_per_point_slice.items() - data_for_points = dict((k, scals[:, v]) for k, v in items) + data_for_points = {k: scals[:, v] for k, v in items} items = data_per_streamline_slice.items() - data_for_streamline = dict((k, props[v]) for k, v in items) + data_for_streamline = {k: props[v] for k, v in items} yield TractogramItem(pts, data_for_streamline, data_for_points) tractogram = LazyTractogram.from_data_func(_read) @@ -503,7 +503,7 @@ def save(self, fileobj): header['scalar_name'][:] = scalar_name for t in tractogram: - if any((len(d) != len(t.streamline) for d in t.data_for_points.values())): + if any(len(d) != len(t.streamline) for d in t.data_for_points.values()): raise DataError('Missing scalars for some points!') points = np.asarray(t.streamline) @@ -747,7 +747,7 @@ def __str__(self): vars['property_names'] = '\n '.join(property_names) # Make all byte strings into strings # Fixes recursion error on Python 3.3 - vars = dict((k, asstr(v) if hasattr(v, 'decode') else v) for k, v in vars.items()) + vars = {k: asstr(v) if hasattr(v, 'decode') else v for k, v in vars.items()} return """\ MAGIC NUMBER: {MAGIC_NUMBER} v.{version} diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 4600782d4b..eb99eabca0 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -147,7 +147,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): def __init__(self, record=True, modules=()): self.modules = set(modules).union(self.class_modules) self._warnreg_copies = {} - super(clear_and_catch_warnings, self).__init__(record=record) + super().__init__(record=record) def __enter__(self): for mod in self.modules: @@ -155,10 +155,10 @@ def __enter__(self): mod_reg = mod.__warningregistry__ self._warnreg_copies[mod] = mod_reg.copy() mod_reg.clear() - return super(clear_and_catch_warnings, self).__enter__() + return super().__enter__() def __exit__(self, *exc_info): - super(clear_and_catch_warnings, self).__exit__(*exc_info) + super().__exit__(*exc_info) for mod in self.modules: if hasattr(mod, '__warningregistry__'): mod.__warningregistry__.clear() @@ -183,7 +183,7 @@ class error_warnings(clear_and_catch_warnings): filter = 'error' def __enter__(self): - mgr = super(error_warnings, self).__enter__() + mgr = super().__enter__() warnings.simplefilter(self.filter) return mgr diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 5287bad4a9..7584d550f6 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -61,7 +61,7 @@ class TestAnalyzeHeader(tws._TestLabeledWrapStruct): header_class = AnalyzeHeader example_file = header_file sizeof_hdr = AnalyzeHeader.sizeof_hdr - supported_np_types = set((np.uint8, np.int16, np.int32, np.float32, np.float64, np.complex64)) + supported_np_types = {np.uint8, np.int16, np.int32, np.float32, np.float64, np.complex64} add_intp(supported_np_types) def test_supported_types(self): @@ -74,7 +74,7 @@ def get_bad_bb(self): return b'\x00' * self.header_class.template_dtype.itemsize def test_general_init(self): - super(TestAnalyzeHeader, self).test_general_init() + super().test_general_init() hdr = self.header_class() # an empty header has shape (0,) - like an empty array # (np.array([])) @@ -497,7 +497,7 @@ def test_orientation(self): assert_array_equal(hdr.get_base_affine(), aff) def test_str(self): - super(TestAnalyzeHeader, self).test_str() + super().test_str() hdr = self.header_class() s1 = str(hdr) # check the datacode recoding diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 5018e95e1f..7558c55ea5 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -421,7 +421,7 @@ class CountingImageOpener(ImageOpener): num_openers = 0 def __init__(self, *args, **kwargs): - super(CountingImageOpener, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) CountingImageOpener.num_openers += 1 diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index 8c4cad7bbb..62da526319 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -53,7 +53,7 @@ def test_shared_range(): if thresh_overflow: assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax)) else: - assert np.all((bit_bigger <= casted_mx)) + assert np.all(bit_bigger <= casted_mx) if it in np.sctypes['uint']: assert mn == 0 continue @@ -79,7 +79,7 @@ def test_shared_range(): if thresh_overflow: assert np.all((bit_smaller == casted_mn) | (bit_smaller == imin)) else: - assert np.all((bit_smaller >= casted_mn)) + assert np.all(bit_smaller >= casted_mn) def test_shared_range_inputs(): diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index ece2e1c6cd..af7ef66bde 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -51,19 +51,19 @@ def test_versioned(): VersionedDatasource(tmpdir) tmpfile = pjoin(tmpdir, 'config.ini') # ini file, but wrong section - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[SOMESECTION]\n') fobj.write('version = 0.1\n') with pytest.raises(DataError): VersionedDatasource(tmpdir) # ini file, but right section, wrong key - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('somekey = 0.1\n') with pytest.raises(DataError): VersionedDatasource(tmpdir) # ini file, right section and key - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1\n') vds = VersionedDatasource(tmpdir) @@ -73,7 +73,7 @@ def test_versioned(): assert vds.minor_version == 1 assert vds.get_filename('config.ini') == tmpfile # ini file, right section and key, funny value - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1.2.dev\n') vds = VersionedDatasource(tmpdir) @@ -142,7 +142,7 @@ def test_data_path(with_nimd_env): # Next, make a fake user directory, and put a file in there with TemporaryDirectory() as tmpdir: tmpfile = pjoin(tmpdir, 'config.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') fobj.write(f'path = {tst_pth}') nibd.get_nipy_user_dir = lambda: tmpdir @@ -153,11 +153,11 @@ def test_data_path(with_nimd_env): with TemporaryDirectory() as tmpdir: nibd.get_nipy_system_dir = lambda: tmpdir tmpfile = pjoin(tmpdir, 'an_example.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') fobj.write(f'path = {tst_pth}\n') tmpfile = pjoin(tmpdir, 'another_example.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') fobj.write('path = %s\n' % '/path/two') assert get_data_path() == tst_list + ['/path/two'] + old_pth @@ -195,7 +195,7 @@ def test_make_datasource(with_nimd_env): with pytest.raises(DataError): make_datasource(pkg_def) tmpfile = pjoin(pkg_dir, 'config.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1\n') ds = make_datasource(pkg_def, data_path=[tmpdir]) @@ -223,7 +223,7 @@ def test_datasource_or_bomber(with_nimd_env): pkg_dir = pjoin(tmpdir, 'pkg') os.mkdir(pkg_dir) tmpfile = pjoin(pkg_dir, 'config.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.2\n') ds = datasource_or_bomber(pkg_def) diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index aa48a3e747..3aa1ae78c5 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -17,7 +17,7 @@ class FBNumpyImage(FileBasedImage): files_types = (('image', '.npy'),) def __init__(self, arr, header=None, extra=None, file_map=None): - super(FBNumpyImage, self).__init__(header, extra, file_map) + super().__init__(header, extra, file_map) self.arr = arr @property diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 9f42e67c0d..e9f65e45a2 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -726,8 +726,7 @@ def slicer_samples(shape): slicers_list = [] for i in range(ndim): slicers_list.append(_slices_for_len(shape[i])) - for sliceobj in product(*slicers_list): - yield sliceobj + yield from product(*slicers_list) # Nones and ellipses yield (None,) if ndim == 0: diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 752aed0b52..b60974de5f 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -39,8 +39,8 @@ def test_concat(): affine = np.eye(4) for dim in range(2, 6): - all_shapes_ND = tuple((shape[:dim] for shape in all_shapes_5D)) - all_shapes_N1D_unary = tuple((shape + (1,) for shape in all_shapes_ND)) + all_shapes_ND = tuple(shape[:dim] for shape in all_shapes_5D) + all_shapes_N1D_unary = tuple(shape + (1,) for shape in all_shapes_ND) all_shapes = all_shapes_ND + all_shapes_N1D_unary # Loop over all possible combinations of images, in first and diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index af82c304ac..39e9b07a83 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -644,7 +644,7 @@ class MakeImageAPI(LoadImageAPI): def obj_params(self): # Return any obj_params from superclass - for func, params in super(MakeImageAPI, self).obj_params(): + for func, params in super().obj_params(): yield func, params # Create new images aff = np.diag([1, 2, 3, 1]) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 59bf214eda..2cbbfc1f5d 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -96,7 +96,7 @@ def test_from_eg_file(self): def test_data_scaling(self): # Test scaling in header - super(TestNifti1PairHeader, self).test_data_scaling() + super().test_data_scaling() hdr = self.header_class() data = np.arange(0, 3, 0.5).reshape((1, 2, 3)) hdr.set_data_shape(data.shape) @@ -1330,9 +1330,7 @@ def test_nifti_dicom_extension(): assert dcmext.get_content().PatientID == 'NiPy' # create a single dicom tag (Patient ID, [0010,0020]) with Explicit VR / LE - dcmbytes_explicit = struct.pack( - '2H2sH4s', 0x10, 0x20, 'LO'.encode('utf-8'), 4, 'NiPy'.encode('utf-8') - ) + dcmbytes_explicit_be = struct.pack('>2H2sH4s', 0x10, 0x20, b'LO', 4, b'NiPy') hdr_be = Nifti1Header(endianness='>') # Big Endian Nifti1Header dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) assert dcmext.__class__ == Nifti1DicomExtension @@ -1552,5 +1548,5 @@ def test_large_nifti1(): data = load('test.nii.gz').get_fdata() # Check that the data are all ones assert image_shape == data.shape - n_ones = np.sum((data == 1.0)) + n_ones = np.sum(data == 1.0) assert np.prod(image_shape) == n_ones diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index f993e342e4..b4f71f2501 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -111,7 +111,7 @@ def test_Opener_various(): class MockIndexedGzipFile(GzipFile): def __init__(self, *args, **kwargs): self._drop_handles = kwargs.pop('drop_handles', False) - super(MockIndexedGzipFile, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) @contextlib.contextmanager @@ -284,7 +284,7 @@ def test_name(): if HAVE_ZSTD: files_to_test += ['test.txt.zst'] for input in files_to_test: - exp_name = input if type(input) == type('') else None + exp_name = input if type(input) == str else None with Opener(input, 'wb') as fobj: assert fobj.name == exp_name @@ -317,7 +317,7 @@ def test_close_if_mine(): if has_closed: assert not fobj.closed fobj.close_if_mine() - is_str = type(input) is type('') + is_str = type(input) is str if has_closed: assert fobj.closed == is_str diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index f1d81cf96c..0a9d7c7dc2 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -210,9 +210,9 @@ def test_top_level_load(): def test_header(): v42_hdr = PARRECHeader(HDR_INFO, HDR_DEFS) for strict_sort in [False, True]: - with open(V4_PAR, 'rt') as fobj: + with open(V4_PAR) as fobj: v4_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=strict_sort) - with open(V41_PAR, 'rt') as fobj: + with open(V41_PAR) as fobj: v41_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=strict_sort) for hdr in (v42_hdr, v41_hdr, v4_hdr): hdr = PARRECHeader(HDR_INFO, HDR_DEFS) @@ -296,7 +296,7 @@ def test_affine_regression(): # Data at http://psydata.ovgu.de/philips_achieva_testfiles/conversion2 for basename, exp_affine in PREVIOUS_AFFINES.items(): fname = pjoin(DATA_PATH, basename + '.PAR') - with open(fname, 'rt') as fobj: + with open(fname) as fobj: hdr = PARRECHeader.from_fileobj(fobj) assert_almost_equal(hdr.get_affine(), exp_affine) @@ -328,7 +328,7 @@ def test_sorting_dual_echo_T1(): # For this .PAR file, instead of getting 1 echo per volume, they get # mixed up unless strict_sort=True t1_par = pjoin(DATA_PATH, 'T1_dual_echo.PAR') - with open(t1_par, 'rt') as fobj: + with open(t1_par) as fobj: t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -359,7 +359,7 @@ def test_sorting_multiple_echos_and_contrasts(): # ... # Type 3, Echo 3, Slices 1-30 t1_par = pjoin(DATA_PATH, 'T1_3echo_mag_real_imag_phase.PAR') - with open(t1_par, 'rt') as fobj: + with open(t1_par) as fobj: t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -398,7 +398,7 @@ def test_sorting_multiecho_ASL(): # For this .PAR file has 3 keys corresponding to volumes: # 'echo number', 'label type', 'dynamic scan number' asl_par = pjoin(DATA_PATH, 'ASL_3D_Multiecho.PAR') - with open(asl_par, 'rt') as fobj: + with open(asl_par) as fobj: asl_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -467,13 +467,13 @@ def test_vol_is_full(): def gen_par_fobj(): for par in glob(pjoin(DATA_PATH, '*.PAR')): - with open(par, 'rt') as fobj: + with open(par) as fobj: yield par, fobj def test_truncated_load(): # Test loading of truncated header - with open(TRUNC_PAR, 'rt') as fobj: + with open(TRUNC_PAR) as fobj: gen_info, slice_info = parse_PAR_header(fobj) with pytest.raises(PARRECError): PARRECHeader(gen_info, slice_info) @@ -504,7 +504,7 @@ def test_vol_calculations(): def test_diffusion_parameters(): # Check getting diffusion parameters from diffusion example dti_par = pjoin(DATA_PATH, 'DTI.PAR') - with open(dti_par, 'rt') as fobj: + with open(dti_par) as fobj: dti_hdr = PARRECHeader.from_fileobj(fobj) assert dti_hdr.get_data_shape() == (80, 80, 10, 8) assert dti_hdr.general_info['diffusion'] == 1 @@ -520,7 +520,7 @@ def test_diffusion_parameters(): def test_diffusion_parameters_strict_sort(): # Check getting diffusion parameters from diffusion example dti_par = pjoin(DATA_PATH, 'DTI.PAR') - with open(dti_par, 'rt') as fobj: + with open(dti_par) as fobj: dti_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -540,7 +540,7 @@ def test_diffusion_parameters_strict_sort(): def test_diffusion_parameters_v4(): dti_v4_par = pjoin(DATA_PATH, 'DTIv40.PAR') - with open(dti_v4_par, 'rt') as fobj: + with open(dti_v4_par) as fobj: dti_v4_hdr = PARRECHeader.from_fileobj(fobj) assert dti_v4_hdr.get_data_shape() == (80, 80, 10, 8) assert dti_v4_hdr.general_info['diffusion'] == 1 @@ -567,7 +567,7 @@ def test_epi_params(): # Check EPI conversion for par_root in ('T2_-interleaved', 'T2_', 'phantom_EPI_asc_CLEAR_2_1'): epi_par = pjoin(DATA_PATH, par_root + '.PAR') - with open(epi_par, 'rt') as fobj: + with open(epi_par) as fobj: epi_hdr = PARRECHeader.from_fileobj(fobj) assert len(epi_hdr.get_data_shape()) == 4 assert_almost_equal(epi_hdr.get_zooms()[-1], 2.0) @@ -577,7 +577,7 @@ def test_xyzt_unit_conversion(): # Check conversion to NIfTI-like has sensible units for par_root in ('T2_-interleaved', 'T2_', 'phantom_EPI_asc_CLEAR_2_1'): epi_par = pjoin(DATA_PATH, par_root + '.PAR') - with open(epi_par, 'rt') as fobj: + with open(epi_par) as fobj: epi_hdr = PARRECHeader.from_fileobj(fobj) nifti_hdr = Nifti1Header.from_header(epi_hdr) assert len(nifti_hdr.get_data_shape()) == 4 @@ -588,7 +588,7 @@ def test_xyzt_unit_conversion(): def test_truncations(): # Test tests for truncation par = pjoin(DATA_PATH, 'T2_.PAR') - with open(par, 'rt') as fobj: + with open(par) as fobj: gen_info, slice_info = parse_PAR_header(fobj) # Header is well-formed as is hdr = PARRECHeader(gen_info, slice_info) @@ -690,10 +690,10 @@ def assert_copy_ok(hdr1, hdr2): assert_copy_ok(hdr, hdr2) assert not hdr.permit_truncated assert not hdr2.permit_truncated - with open(TRUNC_PAR, 'rt') as fobj: + with open(TRUNC_PAR) as fobj: with pytest.raises(PARRECError): PARRECHeader.from_fileobj(fobj) - with open(TRUNC_PAR, 'rt') as fobj: + with open(TRUNC_PAR) as fobj: # Parse but warn on inconsistent header with pytest.warns(UserWarning, match='Header inconsistency'): trunc_hdr = PARRECHeader.from_fileobj(fobj, True) @@ -826,7 +826,7 @@ def test_varying_scaling(): def test_anonymized(): # Test we can read anonymized PAR correctly - with open(ANON_PAR, 'rt') as fobj: + with open(ANON_PAR) as fobj: anon_hdr = PARRECHeader.from_fileobj(fobj) gen_defs, img_defs = anon_hdr.general_info, anon_hdr.image_defs assert gen_defs['patient_name'] == '' @@ -877,7 +877,7 @@ def test_exts2par(): def test_dualTR(): expected_TRs = np.asarray([2000.0, 500.0]) - with open(DUAL_TR_PAR, 'rt') as fobj: + with open(DUAL_TR_PAR) as fobj: with clear_and_catch_warnings(modules=[parrec], record=True) as wlist: simplefilter('always') dualTR_hdr = PARRECHeader.from_fileobj(fobj) @@ -889,7 +889,7 @@ def test_dualTR(): def test_ADC_map(): # test reading an apparent diffusion coefficient map - with open(ADC_PAR, 'rt') as fobj: + with open(ADC_PAR) as fobj: # two truncation warnings expected because general_info indicates: # 1.) multiple directions diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 49a9898ce2..f5a77158ec 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -111,7 +111,7 @@ def values(self): assert rc.code['one'] == 'spam' assert rc.code['first'] == 'spam' assert rc.code['bizarre'] == 'eggs' - assert rc.value_set() == set(['funny', 'list']) + assert rc.value_set() == {'funny', 'list'} assert list(rc.keys()) == ['some', 'keys'] @@ -138,11 +138,11 @@ def test_sugar(): assert rc[1] == rc.field1[1] assert rc['two'] == rc.field1['two'] # keys gets all keys - assert set(rc.keys()) == set((1, 'one', '1', 'first', 2, 'two')) + assert set(rc.keys()) == {1, 'one', '1', 'first', 2, 'two'} # value_set gets set of values from first column - assert rc.value_set() == set((1, 2)) + assert rc.value_set() == {1, 2} # or named column if given - assert rc.value_set('label') == set(('one', 'two')) + assert rc.value_set('label') == {'one', 'two'} # "in" works for values in and outside the set assert 'one' in rc assert 'three' not in rc diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index a089fb7eef..9f07b3933b 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -392,7 +392,7 @@ def test_parrec2nii_with_data(): ['parrec2nii', '--overwrite', '--dwell-time', '--field-strength', '3', dti_par] ) exp_dwell = (26 * 9.087) / (42.576 * 3.4 * 3 * 28) - with open('DTI.dwell_time', 'rt') as fobj: + with open('DTI.dwell_time') as fobj: contents = fobj.read().strip() assert_almost_equal(float(contents), exp_dwell) # ensure trace is removed by default @@ -424,7 +424,7 @@ def test_parrec2nii_with_data(): # Writes .ordering.csv if requested run_command(['parrec2nii', '--overwrite', '--volume-info', dti_par]) assert exists('DTI.ordering.csv') - with open('DTI.ordering.csv', 'r') as csvfile: + with open('DTI.ordering.csv') as csvfile: csvreader = csv.reader(csvfile, delimiter=',') csv_keys = next(csvreader) # header row nlines = 0 # count number of non-header rows diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index e5eb969388..9bc4c928a6 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -83,7 +83,7 @@ class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, HeaderScalingMixin) header_class = Spm99AnalyzeHeader def test_empty(self): - super(TestSpm99AnalyzeHeader, self).test_empty() + super().test_empty() hdr = self.header_class() assert hdr['scl_slope'] == 1 diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index b01195ff5f..ee9329187f 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -996,11 +996,11 @@ def seek(self, *args): def test_fname_ext_ul_case(): # Get filename ignoring the case of the filename extension with InTemporaryDirectory(): - with open('afile.TXT', 'wt') as fobj: + with open('afile.TXT', 'w') as fobj: fobj.write('Interesting information') # OSX usually has case-insensitive file systems; Windows also os_cares_case = not exists('afile.txt') - with open('bfile.txt', 'wt') as fobj: + with open('bfile.txt', 'w') as fobj: fobj.write('More interesting information') # If there is no file, the case doesn't change assert fname_ext_ul_case('nofile.txt') == 'nofile.txt' @@ -1070,7 +1070,7 @@ def test_dtypes(): dt_defs = ((16, 'float32', np.float32),) dtr = make_dt_codes(dt_defs) # check we have the fields we were expecting - assert dtr.value_set() == set((16,)) + assert dtr.value_set() == {16} assert dtr.fields == ('code', 'label', 'type', 'dtype', 'sw_dtype') # These of course should pass regardless of dtype assert dtr[np.float32] == 16 @@ -1085,7 +1085,7 @@ def test_dtypes(): dt_defs = ((16, 'float32', np.float32, 'ASTRING'),) dtr = make_dt_codes(dt_defs) assert dtr[np.dtype('f4').newbyteorder('S')] == 16 - assert dtr.value_set() == set((16,)) + assert dtr.value_set() == {16} assert dtr.fields == ('code', 'label', 'type', 'niistring', 'dtype', 'sw_dtype') assert dtr.niistring[16] == 'ASTRING' # And that unequal elements raises error diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 718700768e..66dda18237 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -357,7 +357,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - structarr = super(MyWrapStruct, klass).default_structarr(endianness) + structarr = super().default_structarr(endianness) structarr['an_integer'] = 1 structarr['a_str'] = b'a string' return structarr diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index e8fba870c1..ac3bf6c0f0 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -74,11 +74,11 @@ class InTemporaryDirectory(TemporaryDirectory): def __enter__(self): self._pwd = os.getcwd() os.chdir(self.name) - return super(InTemporaryDirectory, self).__enter__() + return super().__enter__() def __exit__(self, exc, value, tb): os.chdir(self._pwd) - return super(InTemporaryDirectory, self).__exit__(exc, value, tb) + return super().__exit__(exc, value, tb) class InGivenDirectory: diff --git a/nibabel/viewers.py b/nibabel/viewers.py index bb9f612a7d..9dad3dd17f 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -216,7 +216,7 @@ def __init__(self, data, affine=None, axes=None, title=None): ax.set_ylim(yl) self._volume_ax_objs = dict(step=step, patch=patch) - self._figs = set([a.figure for a in self._axes]) + self._figs = {a.figure for a in self._axes} for fig in self._figs: fig.canvas.mpl_connect('scroll_event', self._on_scroll) fig.canvas.mpl_connect('motion_notify_event', self._on_mouse) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index d31d91ea01..b339b6bab5 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -593,7 +593,7 @@ def array_to_file( if null_scaling and np.can_cast(in_dtype, out_dtype): return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # Force upcasting for floats by making atleast_1d. - slope, inter = [np.atleast_1d(v) for v in (divslope, intercept)] + slope, inter = (np.atleast_1d(v) for v in (divslope, intercept)) # Default working point type for applying slope / inter if slope.dtype.kind in 'iu': slope = slope.astype(float) @@ -621,7 +621,7 @@ def array_to_file( # going to integers # Because we're going to integers, complex inter and slope will only slow # us down, cast to float - slope, inter = [v.astype(_matching_float(v.dtype)) for v in (slope, inter)] + slope, inter = (v.astype(_matching_float(v.dtype)) for v in (slope, inter)) # We'll do the thresholding on the scaled data, so turn off the # thresholding on the unscaled data pre_clips = None @@ -642,7 +642,7 @@ def array_to_file( extremes = np.array(dt_mnmx, dtype=cast_in_dtype) w_type = best_write_scale_ftype(extremes, slope, inter, w_type) # Push up precision by casting the slope, inter - slope, inter = [v.astype(w_type) for v in (slope, inter)] + slope, inter = (v.astype(w_type) for v in (slope, inter)) # We need to know the result of applying slope and inter to the min and # max of the array, in order to clip the output array, after applying # the slope and inter. Otherwise we'd need to clip twice, once before @@ -887,7 +887,7 @@ def apply_read_scaling(arr, slope=None, inter=None): return arr shape = arr.shape # Force float / float upcasting by promoting to arrays - arr, slope, inter = [np.atleast_1d(v) for v in (arr, slope, inter)] + arr, slope, inter = (np.atleast_1d(v) for v in (arr, slope, inter)) if arr.dtype.kind in 'iu': # int to float; get enough precision to avoid infs # Find floating point type for which scaling does not overflow, diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 67e10cd152..8e0b18fb6e 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -94,7 +94,7 @@ def parse(self, string=None, fname=None, fptr=None): if string is not None: fptr = BytesIO(string) elif fname is not None: - fptr = open(fname, 'r') + fptr = open(fname) # store the name of the xml file in case it is needed during parsing self.fname = getattr(fptr, 'name', None) From bf298113da99079c9c7b5e1690e41879828cd472 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 22:53:17 -0500 Subject: [PATCH 136/702] STY: Reduce array().astype() and similar constructs [git-blame-ignore-rev] --- nibabel/freesurfer/io.py | 6 +++--- nibabel/freesurfer/tests/test_mghformat.py | 8 ++++---- nibabel/gifti/tests/test_gifti.py | 2 +- nibabel/tests/test_funcs.py | 2 +- nibabel/tests/test_image_api.py | 2 +- nibabel/tests/test_proxy_api.py | 2 +- nibabel/tests/test_volumeutils.py | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 6e8538c202..ec6b474b04 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -70,9 +70,9 @@ def _read_volume_info(fobj): if key in ('valid', 'filename'): volume_info[key] = pair[1].strip() elif key == 'volume': - volume_info[key] = np.array(pair[1].split()).astype(int) + volume_info[key] = np.array(pair[1].split(), int) else: - volume_info[key] = np.array(pair[1].split()).astype(float) + volume_info[key] = np.array(pair[1].split(), float) # Ignore the rest return volume_info @@ -521,7 +521,7 @@ def write_annot(filepath, labels, ctab, names, fill_ctab=True): vnum = len(labels) def write(num, dtype=dt): - np.array([num]).astype(dtype).tofile(fobj) + np.array([num], dtype).tofile(fobj) def write_string(s): s = (s if isinstance(s, bytes) else s.encode()) + b'\x00' diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 0a850488c2..ded1aca8a2 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -123,7 +123,7 @@ def test_write_mgh(): def test_write_noaffine_mgh(): # now just save the image without the vox2ras transform # and see if it uses the default values to save - v = np.ones((7, 13, 3, 22)).astype(np.uint8) + v = np.ones((7, 13, 3, 22), np.uint8) # form a MGHImage object using data # and the default affine matrix (Note the "None") img = MGHImage(v, None) @@ -175,7 +175,7 @@ def bad_dtype_mgh(): """ # try to write an unsigned short and make sure it # raises MGHError - v = np.ones((7, 13, 3, 22)).astype(np.uint16) + v = np.ones((7, 13, 3, 22), np.uint16) # form a MGHImage object using data # and the default affine matrix (Note the "None") MGHImage(v, None) @@ -189,7 +189,7 @@ def test_bad_dtype_mgh(): def test_filename_exts(): # Test acceptable filename extensions - v = np.ones((7, 13, 3, 22)).astype(np.uint8) + v = np.ones((7, 13, 3, 22), np.uint8) # form a MGHImage object using data # and the default affine matrix (Note the "None") img = MGHImage(v, None) @@ -251,7 +251,7 @@ def test_header_updating(): def test_cosine_order(): # Test we are interpreting the cosine order right - data = np.arange(60).reshape((3, 4, 5)).astype(np.int32) + data = np.arange(60, dtype=np.int32).reshape((3, 4, 5)) aff = np.diag([2.0, 3, 4, 1]) aff[0] = [2, 1, 0, 10] img = MGHImage(data, aff) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 8858de589f..49a8cbc07f 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -465,7 +465,7 @@ def test_darray_dtype_coercion_failures(): encodings = ('ASCII', 'B64BIN', 'B64GZ') for data_dtype, darray_dtype, encoding in itertools.product(dtypes, dtypes, encodings): da = GiftiDataArray( - np.arange(10).astype(data_dtype), + np.arange(10, dtype=data_dtype), encoding=encoding, intent='NIFTI_INTENT_NODE_INDEX', datatype=darray_dtype, diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index b60974de5f..10f6e90813 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -127,7 +127,7 @@ def test_concat(): def test_closest_canonical(): # Use 32-bit data so that the AnalyzeImage class doesn't complain - arr = np.arange(24).reshape((2, 3, 4, 1)).astype(np.int32) + arr = np.arange(24, dtype=np.int32).reshape((2, 3, 4, 1)) # Test with an AnalyzeImage first img = AnalyzeImage(arr, np.eye(4)) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 39e9b07a83..091bc57e8c 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -255,7 +255,7 @@ def validate_data_interface(self, imaker, params): with maybe_deprecated(meth_name), pytest.raises(ValueError): method(caching='something') # dataobj is read only - fake_data = np.zeros(img.shape).astype(img.get_data_dtype()) + fake_data = np.zeros(img.shape, dtype=img.get_data_dtype()) with pytest.raises(AttributeError): img.dataobj = fake_data # So is in_memory diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 1bdd6c26e8..1c9e02186c 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -61,7 +61,7 @@ def _some_slicers(shape): ndim = len(shape) - slicers = np.eye(ndim).astype(int).astype(object) + slicers = np.eye(ndim, dtype=int).astype(object) slicers[slicers == 0] = slice(None) for i in range(ndim): if i % 2: diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index ee9329187f..ab5bd38ee6 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -1161,7 +1161,7 @@ def assert_rt( (None, (-2, 49)), (None, 1), ): - data = np.arange(24).astype(np.float32) + data = np.arange(24, dtype=np.float32) assert_rt( data, shape, From 69785cd53e7139e90b8cf3cd41ca154e502177f0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 30 Dec 2022 09:07:52 -0500 Subject: [PATCH 137/702] MNT: make .git-blame-ignore-revs --- .git-blame-ignore-revs | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..d700b59665 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,12 @@ +# Thu Dec 29 22:53:17 2022 -0500 - effigies@gmail.com - STY: Reduce array().astype() and similar constructs +bf298113da99079c9c7b5e1690e41879828cd472 +# Thu Dec 29 22:32:46 2022 -0500 - effigies@gmail.com - STY: pyupgrade --py37-plus +4481a4c2640bd4be6e9c468e550d01aae448ab99 +# Fri Dec 30 11:01:19 2022 -0500 - effigies@gmail.com - STY: Run vanilla blue +6b0ddd23b1da1df7ca9ae275673f82bfa20a754c +# Thu Dec 29 21:46:13 2022 -0500 - markiewicz@stanford.edu - STY: Manual, blue-compatible touchups +263fca9bf6d4ca314a5a322b4824d6f53d0589df +# Thu Dec 29 21:32:00 2022 -0500 - effigies@gmail.com - STY: isort +0ab2856cac4d4baae7ab3e2f6d58421db55d807f +# Thu Dec 29 21:30:29 2022 -0500 - effigies@gmail.com - STY: blue +1a8dd302ff85b1136c81d492509b80e7748339f0 \ No newline at end of file From 929227dc0dd4f8e752b568d4fb43b593038d3f5e Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 1 Jan 2023 07:57:12 +0200 Subject: [PATCH 138/702] STY: Apply TOML formatting. --- pyproject.toml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d7a9092e98..8909ff9663 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,22 +49,22 @@ dicom = ["pydicom >=1.0.0"] dicomfs = ["nibabel[dicom]", "pillow"] dev = ["gitpython", "twine"] doc = [ - "matplotlib >= 1.5.3", - "numpydoc", - "sphinx ~= 5.3", - "texext", - "tomli; python_version < \"3.11\"", + "matplotlib >= 1.5.3", + "numpydoc", + "sphinx ~= 5.3", + "texext", + "tomli; python_version < \"3.11\"", ] minc2 = ["h5py"] spm = ["scipy"] style = ["flake8"] test = [ - "coverage", - "pytest !=5.3.4", - "pytest-cov", - "pytest-doctestplus", - "pytest-httpserver", - "pytest-xdist", + "coverage", + "pytest !=5.3.4", + "pytest-cov", + "pytest-doctestplus", + "pytest-httpserver", + "pytest-xdist", ] zstd = ["pyzstd >= 0.14.3"] doctest = ["nibabel[doc,test]"] From 34b096d9973e0a0a2e995a96632e39ea78798054 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 1 Jan 2023 08:03:37 +0200 Subject: [PATCH 139/702] STY: Added style dependencies to dev dependencies and organized installation modifiers. --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8909ff9663..95c19a9dd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,9 +45,10 @@ nib-roi = "nibabel.cmdline.roi:main" parrec2nii = "nibabel.cmdline.parrec2nii:main" [project.optional-dependencies] +all = ["nibabel[dicomfs,dev,doc,minc2,spm,style,test,zstd]"] +dev = ["gitpython", "twine", "nibabel[style]"] dicom = ["pydicom >=1.0.0"] dicomfs = ["nibabel[dicom]", "pillow"] -dev = ["gitpython", "twine"] doc = [ "matplotlib >= 1.5.3", "numpydoc", @@ -55,9 +56,10 @@ doc = [ "texext", "tomli; python_version < \"3.11\"", ] +doctest = ["nibabel[doc,test]"] minc2 = ["h5py"] spm = ["scipy"] -style = ["flake8"] +style = ["flake8", "blue", "isort"] test = [ "coverage", "pytest !=5.3.4", @@ -67,8 +69,6 @@ test = [ "pytest-xdist", ] zstd = ["pyzstd >= 0.14.3"] -doctest = ["nibabel[doc,test]"] -all = ["nibabel[dicomfs,dev,doc,minc2,spm,style,test,zstd]"] [tool.setuptools] platforms = ["OS Independent"] From e9c0b2886cd9a4826f1997b13cc0356580ea9c4e Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 1 Jan 2023 08:06:38 +0200 Subject: [PATCH 140/702] STY: Switched single to double quotes to conform with the rest of the file. --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 95c19a9dd0..3e1ba04449 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -83,13 +83,13 @@ nibabel = ["tests/data/*", "*/tests/data/*", "benchmarks/pytest.benchmark.ini"] [tool.blue] line_length = 99 -target-version = ['py37'] -extend-exclude = ''' +target-version = ["py37"] +extend-exclude = """ ( _version.py | nibabel/externals/ ) -''' +""" [tool.isort] profile = "black" From f0770b9653f84012c067bb9aefbefba803a93845 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 13 Dec 2022 04:37:38 +0100 Subject: [PATCH 141/702] MNT: Fix CI warnings ResourceWarning: unclosed file <_io.BufferedReader name='/home/runner/work/nibabel/nibabel/virtenv/lib/python3.9/site-packages/nibabel/tests/data/tinypet.v'> --- nibabel/tests/test_ecat.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 9cb9f91e1a..ff74b7b084 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -59,7 +59,7 @@ def test_dtype(self): hdr.get_data_dtype() def test_header_codes(self): - fid = open(ecat_file, 'rb') + fid = open(self.example_file, 'rb') hdr = self.header_class() newhdr = hdr.from_fileobj(fid) fid.close() @@ -117,6 +117,7 @@ def test_mlist_errors(self): hdr = self.header_class.from_fileobj(fid) hdr['num_frames'] = 6 mlist = read_mlist(fid, hdr.endianness) + fid.close() mlist = np.array( [ [1.68427540e07, 3.00000000e00, 1.20350000e04, 1.00000000e00], From 3026073858ff2b776590f6c85853bf36a6a550e5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 12:00:49 -0500 Subject: [PATCH 142/702] MNT: Add blue and isort as pre-commit hooks --- .pre-commit-config.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..fb0c2b908c --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: https://github.com/grantjenks/blue + rev: v0.9.1 + hooks: + - id: blue + - repo: https://github.com/pycqa/isort + rev: 5.11.2 + hooks: + - id: isort From ca09a6dd3fd427e06da876fbe25aa786a3969569 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 12:33:49 -0500 Subject: [PATCH 143/702] MNT: Exclude versioneer/externals from under pre-commit --- pyproject.toml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3e1ba04449..f98e03119f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,14 +84,15 @@ nibabel = ["tests/data/*", "*/tests/data/*", "benchmarks/pytest.benchmark.ini"] [tool.blue] line_length = 99 target-version = ["py37"] -extend-exclude = """ +force-exclude = """ ( _version.py | nibabel/externals/ + | versioneer.py ) """ [tool.isort] profile = "black" line_length = 99 -extend_skip = ["_version.py", "externals"] +extend_skip = ["_version.py", "externals", "versioneer.py"] From e87364a18b982fa7cff18a69e8d5fe96465e28cf Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 1 Jan 2023 20:08:19 +0200 Subject: [PATCH 144/702] STY: Added some pre-commit hooks --- .pre-commit-config.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fb0c2b908c..8c884eb2cc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,17 @@ +exclude: '.*/data/.*' repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-json + - id: check-toml + - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-vcs-permalinks - repo: https://github.com/grantjenks/blue rev: v0.9.1 hooks: From 5915db9ed5a398868d1e4a28d75016339bc3294a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 15:52:07 -0500 Subject: [PATCH 145/702] PY3: Upgrade dicomfs.wsgi --- tools/dicomfs.wsgi | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/dicomfs.wsgi b/tools/dicomfs.wsgi index a5f9d30984..f8b1505357 100644 --- a/tools/dicomfs.wsgi +++ b/tools/dicomfs.wsgi @@ -128,7 +128,7 @@ class HandlerError: def application(environ, start_response): try: (status, c_type, output) = handler(environ) - except HandlerError, exc: + except HandlerError as exc: status = exc.status output = exc.output c_type = 'text/plain' @@ -138,7 +138,7 @@ def application(environ, start_response): status = '500 Internal Server Error' output = ''.join(lines) c_type = 'text/plain' - response_headers = [('Content-Type', c_type), + response_headers = [('Content-Type', c_type), ('Content-Length', str(len(output)))] if c_type == 'image/nifti': response_headers.append(('Content-Disposition', 'attachment; filename=image.nii')) @@ -191,12 +191,12 @@ def patient_date_time(patient, date_time): for s in studies_getter(): if s.patient_name_or_uid() != patient: continue - if date_time != '%s_%s' % (s.date, s.time): + if date_time != '{}_{}'.format(s.date, s.time): continue study = s break if study is None: - raise HandlerError, ('404 Not Found', 'study not found') + raise HandlerError('404 Not Found', 'study not found') template = template_env.from_string(patient_date_time_template) return template.render(study=study).encode('utf-8') @@ -205,12 +205,12 @@ def nifti(patient, date_time, scan): for s in studies_getter(): if s.patient_name_or_uid() != patient: continue - if date_time != '%s_%s' % (s.date, s.time): + if date_time != '{}_{}'.format(s.date, s.time): continue study = s break if study is None: - raise HandlerError, ('404 Not Found', 'study not found') + raise HandlerError('404 Not Found', 'study not found') ser = None for series in s.series: if series.number != scan: @@ -218,7 +218,7 @@ def nifti(patient, date_time, scan): ser = series break if ser is None: - raise HandlerError, ('404 Not Found', 'series not found') + raise HandlerError('404 Not Found', 'series not found') return ser.as_nifti() def png(patient, date_time, scan): @@ -226,12 +226,12 @@ def png(patient, date_time, scan): for s in studies_getter(): if s.patient_name_or_uid() != patient: continue - if date_time != '%s_%s' % (s.date, s.time): + if date_time != '{}_{}'.format(s.date, s.time): continue study = s break if study is None: - raise HandlerError, ('404 Not Found', 'study not found') + raise HandlerError('404 Not Found', 'study not found') ser = None for series in s.series: if series.number != scan: @@ -239,7 +239,7 @@ def png(patient, date_time, scan): ser = series break if ser is None: - raise HandlerError, ('404 Not Found', 'series not found') + raise HandlerError('404 Not Found', 'series not found') index = len(ser.storage_instances) / 2 return ser.as_png(index, True) From c21a78ced53c185ff9ce79fe26aec8756c972c29 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 1 Jan 2023 15:59:45 -0500 Subject: [PATCH 146/702] DOC: Use permalinks in BIAPs --- doc/source/devel/biaps/biap_0004.rst | 4 ++-- doc/source/devel/biaps/biap_0006.rst | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/devel/biaps/biap_0004.rst b/doc/source/devel/biaps/biap_0004.rst index d8ac1569af..e3149ba711 100644 --- a/doc/source/devel/biaps/biap_0004.rst +++ b/doc/source/devel/biaps/biap_0004.rst @@ -123,7 +123,7 @@ data array. At the moment, dcmstack deals with this by wrapping the image with DICOM meta information in `NiftiWrapper` object : see -https://github.com/moloney/dcmstack/blob/master/src/dcmstack/dcmmeta.py#L1185 . +https://github.com/moloney/dcmstack/blob/d157741/src/dcmstack/dcmmeta.py#L1232. This object accepts a Nifti image as input, that usually contains a `DcmMetaExtension`, and has methods `get_meta` (to get metadata from extension), `split` (for taking slice specific metadata into the split parts), `meta_valid` @@ -231,5 +231,5 @@ valid in relation to the current image. .. _dcmstack : https://github.com/moloney/dcmstack -.. _DcmMetaExtension : https://github.com/moloney/dcmstack/blob/master/src/dcmstack/dcmmeta.py#L92 +.. _DcmMetaExtension : https://github.com/moloney/dcmstack/blob/d157741/src/dcmstack/dcmmeta.py#L112 .. vim: ft=rst diff --git a/doc/source/devel/biaps/biap_0006.rst b/doc/source/devel/biaps/biap_0006.rst index ad4a0f9b8d..16a3a4833f 100644 --- a/doc/source/devel/biaps/biap_0006.rst +++ b/doc/source/devel/biaps/biap_0006.rst @@ -194,7 +194,7 @@ In NIfTI: We saw above that the MGH format refers to a volume (in our sense) as a *frame*. ECAT has the same usage - a frame is a 3D volume. The fmristat software uses frame in the same sense, e.g., `line 32 of example.m -`_. +`_. Unfortunately DICOM appears to use "frame" to mean a 2D slice. For example, here is the definition of a "multi-frame image":: From aa0db5f4a687d6e5a16ee81fec363d16d4b5495a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 16:06:51 -0500 Subject: [PATCH 147/702] MNT: Ensure newline at end of ignore-revs file --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 093e177c36..e168210b13 100644 --- a/Makefile +++ b/Makefile @@ -81,6 +81,7 @@ $(WWW_DIR): .git-blame-ignore-revs: git log --grep "\[git-blame-ignore-rev\]" --pretty=format:"# %ad - %ae - %s%n%H" \ > .git-blame-ignore-revs + echo >> .git-blame-ignore-revs # # Tests From d14c1cf282a9c3b19189f490f10c35f5739e24d1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 12:38:02 -0500 Subject: [PATCH 148/702] STY: Run pre-commit config on all files [git-blame-ignore-rev] Co-authored-by: Zvi Baratz --- .git-blame-ignore-revs | 2 +- Makefile | 2 +- Makefile.win | 2 +- bin/nib-ls | 1 - bin/nib-nifti-dx | 2 +- bin/parrec2nii | 1 - doc/source/README.txt | 2 +- doc/source/_templates/indexsidebar.html | 1 - doc/source/conf.py | 116 +++++++------- doc/source/devel/biaps/biap_0001.rst | 1 - doc/source/devel/biaps/biap_0003.rst | 4 +- doc/source/devel/biaps/biap_0004.rst | 106 ++++++------- doc/source/devel/register_me.py | 6 +- doc/source/devel/spm_use.rst | 14 +- doc/source/dicom/dcm2nii_algorithms.rst | 2 +- doc/source/dicom/derivations/dicom_mosaic.py | 17 +- .../dicom/derivations/spm_dicom_orient.py | 13 +- doc/source/dicom/dicom_fields.rst | 2 +- doc/source/dicom/dicom_info.rst | 2 +- doc/source/dicom/dicom_mosaic.rst | 10 +- doc/source/dicom/dicom_orientation.rst | 33 ++-- doc/source/dicom/siemens_csa.rst | 18 +-- doc/source/dicom/spm_dicom.rst | 24 +-- doc/source/gitwash/configure_git.rst | 8 +- doc/source/gitwash/development_workflow.rst | 2 +- doc/source/gitwash/forking_hell.rst | 1 - doc/source/gitwash/git_resources.rst | 4 +- doc/source/gitwash/index.rst | 2 - doc/source/gitwash/set_up_fork.rst | 1 - doc/source/installing_data.rst | 3 +- doc/source/notebooks/ata_error.ipynb | 2 +- .../notebooks/cross_product_error.ipynb | 2 +- doc/source/old/design.txt | 22 ++- doc/source/old/format_design.txt | 7 +- doc/source/old/orientation.txt | 1 - doc/source/scripts/make_coord_examples.py | 54 +++---- doc/tools/LICENSE.txt | 1 - doc/tools/apigen.py | 87 ++++++----- doc/tools/build_modref_templates.py | 53 ++++--- nibabel/tests/test_viewers.py | 2 +- nisext/__init__.py | 3 +- nisext/py3builder.py | 10 +- nisext/sexts.py | 103 ++++++------ nisext/testers.py | 125 +++++++-------- nisext/tests/test_sexts.py | 36 +++-- nisext/tests/test_testers.py | 14 +- setup.py | 2 +- tools/bisect_nose.py | 38 ++--- tools/ci/activate.sh | 1 - tools/dicomfs.wsgi | 25 ++- tools/gitwash_dumper.py | 146 ++++++++++-------- tools/make_tarball.py | 4 +- tools/mpkg_wrapper.py | 2 + tools/prep_zenodo.py | 19 ++- tools/profile | 50 +++--- tools/refresh_readme.py | 2 +- tools/update_requirements.py | 29 ++-- tools/valgrind-python.supp | 2 - 58 files changed, 616 insertions(+), 628 deletions(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index d700b59665..78015d8cf1 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -9,4 +9,4 @@ bf298113da99079c9c7b5e1690e41879828cd472 # Thu Dec 29 21:32:00 2022 -0500 - effigies@gmail.com - STY: isort 0ab2856cac4d4baae7ab3e2f6d58421db55d807f # Thu Dec 29 21:30:29 2022 -0500 - effigies@gmail.com - STY: blue -1a8dd302ff85b1136c81d492509b80e7748339f0 \ No newline at end of file +1a8dd302ff85b1136c81d492509b80e7748339f0 diff --git a/Makefile b/Makefile index e168210b13..7d4c6666ae 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ RELEASE_VERSION ?= $(SETUPPY_VERSION) all: build -build: +build: $(PYTHON) setup.py config --noisy $(PYTHON) setup.py build diff --git a/Makefile.win b/Makefile.win index 30f8275311..00c15ea031 100644 --- a/Makefile.win +++ b/Makefile.win @@ -1,6 +1,6 @@ # Makefile NiBabel under Windows using a standard Python distribution -installer: +installer: # now the installer python setup.py bdist_wininst diff --git a/bin/nib-ls b/bin/nib-ls index f2e447d518..067efb0533 100755 --- a/bin/nib-ls +++ b/bin/nib-ls @@ -13,6 +13,5 @@ Output a summary table for neuroimaging files (resolution, dimensionality, etc.) from nibabel.cmdline.ls import main - if __name__ == '__main__': main() diff --git a/bin/nib-nifti-dx b/bin/nib-nifti-dx index b395ee1d9a..2562e0f0d8 100755 --- a/bin/nib-nifti-dx +++ b/bin/nib-nifti-dx @@ -7,7 +7,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Print nifti diagnostics for header files """ +"""Print nifti diagnostics for header files""" from nibabel.cmdline.nifti_dx import main diff --git a/bin/parrec2nii b/bin/parrec2nii index 27a1abca05..4a21c6d288 100755 --- a/bin/parrec2nii +++ b/bin/parrec2nii @@ -4,6 +4,5 @@ from nibabel.cmdline.parrec2nii import main - if __name__ == '__main__': main() diff --git a/doc/source/README.txt b/doc/source/README.txt index 2a3d2647d6..32b5df8c09 100644 --- a/doc/source/README.txt +++ b/doc/source/README.txt @@ -16,7 +16,7 @@ Discover available make targets:: make help Clean up previous build:: - + make clean Build html documentation:: diff --git a/doc/source/_templates/indexsidebar.html b/doc/source/_templates/indexsidebar.html index be655cc5f4..642bae6738 100644 --- a/doc/source/_templates/indexsidebar.html +++ b/doc/source/_templates/indexsidebar.html @@ -19,4 +19,3 @@

Search mailing list archive

- diff --git a/doc/source/conf.py b/doc/source/conf.py index 1e3d298fdc..04ac32483b 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -42,8 +42,9 @@ try: import nibabel except ImportError: - raise RuntimeError('Need nibabel on Python PATH; consider "make htmldoc" ' - 'from nibabel root directory') + raise RuntimeError( + 'Need nibabel on Python PATH; consider "make htmldoc" from nibabel root directory' + ) # -- General configuration ---------------------------------------------------- @@ -55,23 +56,24 @@ fobj.write(rel['long_description']) # Load metadata from setup.cfg -with open(Path("../../pyproject.toml"), 'rb') as fobj: +with open(Path('../../pyproject.toml'), 'rb') as fobj: pyproject = tomllib.load(fobj) -authors = pyproject["project"]["authors"][0] +authors = pyproject['project']['authors'][0] # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.mathjax', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.autosummary', - 'texext.math_dollar', # has to go before numpydoc - 'numpydoc', - 'matplotlib.sphinxext.plot_directive', - ] +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.mathjax', + 'sphinx.ext.inheritance_diagram', + 'sphinx.ext.autosummary', + 'texext.math_dollar', # has to go before numpydoc + 'numpydoc', + 'matplotlib.sphinxext.plot_directive', +] # Autosummary always wants to use a `generated/` directory. # We generate with `make api-stamp` @@ -85,13 +87,13 @@ source_suffix = '.rst' # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. -project = u'NiBabel' +project = 'NiBabel' copyright = f"2006-2022, {authors['name']} <{authors['email']}>" # The version info for the project you're documenting, acts as replacement for @@ -105,11 +107,11 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y, %H:%M PDT' @@ -124,32 +126,32 @@ exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Sphinxext configuration -------------------------------------------------- # Set attributes for layout of inheritance diagrams -inheritance_graph_attrs = dict(rankdir="LR", size='"6.0, 8.0"', fontsize=14, - ratio='compress') -inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75, - color='dodgerblue1', style='filled') +inheritance_graph_attrs = dict(rankdir='LR', size='"6.0, 8.0"', fontsize=14, ratio='compress') +inheritance_node_attrs = dict( + shape='ellipse', fontsize=14, height=0.75, color='dodgerblue1', style='filled' +) # Flag to show todo items in rendered output todo_include_todos = True @@ -168,26 +170,26 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = '' +# html_title = '' # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -196,31 +198,39 @@ # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # Content template for the index page. html_index = 'index.html' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -html_sidebars = {'index': ['localtoc.html', 'relations.html', 'sourcelink.html', - 'indexsidebar.html', 'searchbox.html', 'reggie.html']} +html_sidebars = { + 'index': [ + 'localtoc.html', + 'relations.html', + 'sourcelink.html', + 'indexsidebar.html', + 'searchbox.html', + 'reggie.html', + ] +} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {'index': 'index.html'} +# html_additional_pages = {'index': 'index.html'} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = True @@ -228,10 +238,10 @@ # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'nibabeldoc' @@ -241,34 +251,32 @@ # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). -latex_documents = [ - ('index', 'nibabel.tex', u'NiBabel Documentation', u'NiBabel Authors', - 'manual')] +latex_documents = [('index', 'nibabel.tex', 'NiBabel Documentation', 'NiBabel Authors', 'manual')] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. diff --git a/doc/source/devel/biaps/biap_0001.rst b/doc/source/devel/biaps/biap_0001.rst index 7cf2b5b0ac..659967b549 100644 --- a/doc/source/devel/biaps/biap_0001.rst +++ b/doc/source/devel/biaps/biap_0001.rst @@ -297,4 +297,3 @@ How about the names in the proposal? ``is_proxy``; ``unproxy=True``? .. vim: ft=rst - diff --git a/doc/source/devel/biaps/biap_0003.rst b/doc/source/devel/biaps/biap_0003.rst index 3b4bdad24e..7abb07efc1 100644 --- a/doc/source/devel/biaps/biap_0003.rst +++ b/doc/source/devel/biaps/biap_0003.rst @@ -41,7 +41,7 @@ From `adding nifti extensions`_: * 4 = NIFTI_ECODE_AFNI = AFNI header attributes: The format of the AFNI extension in the NIfTI-1.1 format is described at http://nifti.nimh.nih.gov/nifti-1/AFNIextension1/ -* 6 = NIFTI_ECODE_COMMENT = comment: arbitrary non-NUL ASCII text, with no +* 6 = NIFTI_ECODE_COMMENT = comment: arbitrary non-NUL ASCII text, with no additional structure implied * 8 = NIFTI_ECODE_XCEDE = XCEDE metadata: http://www.nbirn.net/Resources/Users/Applications/xcede/index.htm @@ -369,7 +369,7 @@ apply to the Cartesian product of the image axis values. For example, if the values of ``applies_to`` == ``['slice', 'time']``, and the slice and time axes in the array are lengths (6, 10) respectively, then the values apply to all combinations of the 6 possible values for slice indices and the 10 possible -values for the time indices (ie apply to all 6x10=60 values). The axis metadata +values for the time indices (ie apply to all 6x10=60 values). The axis metadata values in this case can be: * a scalar. The value applies to every combination of (slice, time) diff --git a/doc/source/devel/biaps/biap_0004.rst b/doc/source/devel/biaps/biap_0004.rst index e3149ba711..229025d01a 100644 --- a/doc/source/devel/biaps/biap_0004.rst +++ b/doc/source/devel/biaps/biap_0004.rst @@ -16,9 +16,9 @@ nibabel objects and functions. Motivation ********** -It is very common to convert source DICOM images to another format, typically -Nifti, before doing any image processing. The Nifti format is significantly -easier to work with and has wide spread compatibility. However, the vast +It is very common to convert source DICOM images to another format, typically +Nifti, before doing any image processing. The Nifti format is significantly +easier to work with and has wide spread compatibility. However, the vast amount of meta data stored in the source DICOM files will be lost. After implementing this proposal, users will be able to preserve all of the @@ -32,7 +32,7 @@ private elements. The meta data will then be easily accessible through the (256, 256, 24, 8) >>> print nii.get_meta('RepetitionTime') 3500.0 - >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx)) + >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx)) for idx in xrange(data.shape[-1])] >>> print echo_times [16.4, 32.8, 49.2, 65.6, 82.0, 98.4, 114.8, 131.2] @@ -50,25 +50,25 @@ Overview ******** dcmstack reads a series of DICOM images, works out their relationship in terms -of slices and volumes, and compiles them into multidimensional volumes. It can -produce the corresponding data volume and affine, or a Nifti image (with any +of slices and volumes, and compiles them into multidimensional volumes. It can +produce the corresponding data volume and affine, or a Nifti image (with any additional header information set appropriately). -In the course of the read, dcmstack creates a `DcmMeta` object for -each input file. This object is an ordered mapping that can contain a copy -of all the meta data in the DICOM header. By default some filtering is -applied to reduce the chance of including PHI. The set of DcmMeta objects are -then merged together in the same order as the image data to create a single +In the course of the read, dcmstack creates a `DcmMeta` object for +each input file. This object is an ordered mapping that can contain a copy +of all the meta data in the DICOM header. By default some filtering is +applied to reduce the chance of including PHI. The set of DcmMeta objects are +then merged together in the same order as the image data to create a single DcmMeta object that summarizes all of the meta data for the series. -To summarize the meta data, each element is classified based on how the values -repeat (e.g. const, per_slice, per_volume, etc.). Each element has a name (the -keyword from the DICOM standard) and one or more values (the number of values -depends on the classification and the shape of the image). Each classification's +To summarize the meta data, each element is classified based on how the values +repeat (e.g. const, per_slice, per_volume, etc.). Each element has a name (the +keyword from the DICOM standard) and one or more values (the number of values +depends on the classification and the shape of the image). Each classification's meta data is stored stored in a separate nested dictionary. -While creating the Nifti image output, the `DcmMeta` is stored in a -`DcmMetaExtension` which can be added as a header extension. This extension +While creating the Nifti image output, the `DcmMeta` is stored in a +`DcmMetaExtension` which can be added as a header extension. This extension simply does a JSON encoding directly on the `DcmMeta` object. When working with these images, it's possible to keep track of the @@ -78,7 +78,7 @@ slice, and remove information for other slices. Or when merging 3D volumes to a 4D time series, we want to merge together the meta data too. At the moment, dcmstack only creates Nifti images. There's no reason that this -should be so, and the relationship of dcmstack to other spatial images should be +should be so, and the relationship of dcmstack to other spatial images should be more flexible. ****** @@ -105,10 +105,10 @@ wrapping the `DcmMeta` in the Extension API? Status ------ -Resolved. We now have a separate `DcmMeta` object which inherits from -`OrderedDict` and contains all of the functionality previously in -`DcmMetaExtension` except those related to acting as a Nifti1Extension. -The `DcmMetaExtension` now provides just the functionality for being +Resolved. We now have a separate `DcmMeta` object which inherits from +`OrderedDict` and contains all of the functionality previously in +`DcmMetaExtension` except those related to acting as a Nifti1Extension. +The `DcmMetaExtension` now provides just the functionality for being a Nifti1Extension. Keeping track of metadata when manipulating images @@ -117,9 +117,9 @@ Keeping track of metadata when manipulating images When slicing images, it is good to be able to keep track of the relevant DICOM metadata for the particular slice. Or when merging images, it is good to be able to compile the metadata across slices into the (e.g) volume metadata. Or, -say, when coregistering an image, it is good to be able to know that the -metadata that is per-slice no longer directly corresponds to a slice of the -data array. +say, when coregistering an image, it is good to be able to know that the +metadata that is per-slice no longer directly corresponds to a slice of the +data array. At the moment, dcmstack deals with this by wrapping the image with DICOM meta information in `NiftiWrapper` object : see @@ -146,8 +146,8 @@ Put the `DcmMeta` data into the `extra` object that is input to the Add a `get_meta` method to `SpatialImage` that uses the to-be-defined API of the `extra` object. Maybe, by default, this would just get keys out of the mapping. -Define an API for the `extra` object to give back metadata that is potentially -varying (per slice or volume). We also need a way to populate the `extra` object +Define an API for the `extra` object to give back metadata that is potentially +varying (per slice or volume). We also need a way to populate the `extra` object when loading an image that has an associated `DcmMeta` object. Use this API to get metadata. Try and make this work with functions outside the @@ -179,54 +179,54 @@ Add `create_dcmmeta` method to the nibabel DICOM wrapper objects, that can be specialized for each known DICOM format variation. Put the rules for slice information etc into each class. -For the Siemens files, we will need to make a list of elements from the private -CSA headers that are known to be slice specific. For the multiframe DICOM files -we should be able to do this in a programmatic manner, since the varying data -should live in the PerFrameFunctionalSequence DICOM element. Each element that -is reclassified should be simplified with the `DcmMeta.simplify` method so that -it can be classified appropriately. +For the Siemens files, we will need to make a list of elements from the private +CSA headers that are known to be slice specific. For the multiframe DICOM files +we should be able to do this in a programmatic manner, since the varying data +should live in the PerFrameFunctionalSequence DICOM element. Each element that +is reclassified should be simplified with the `DcmMeta.simplify` method so that +it can be classified appropriately. Meta data in nested DICOM sequences can not be independently classified ======================================================================= -The code for summarizing meta data only works on the top level of key/value -pairs. Any value that is a nested dataset is treated as a single entity, -which prevents us from classifying its individual elements differently. +The code for summarizing meta data only works on the top level of key/value +pairs. Any value that is a nested dataset is treated as a single entity, +which prevents us from classifying its individual elements differently. -In a DICOM data set, any element that is a sequence contains one or more -nested DICOM data sets. For most MRI images this is not an issue since -they rarely contain many sequences, and the ones they do are usually small -and relatively unimportant. However in multiframe DICOM files make heavy +In a DICOM data set, any element that is a sequence contains one or more +nested DICOM data sets. For most MRI images this is not an issue since +they rarely contain many sequences, and the ones they do are usually small +and relatively unimportant. However in multiframe DICOM files make heavy use of nested sequences to store data. Plan ---- -This same issue was solved for the translated Siemens CSA sub headers by -unpacking each nested dataset by joining the keys from each level with a -dotted notation. For example, in the `CsaSeries` subheader there is a nested -`MrPhoenixProtocol` dataset which has an element `ulVersion` so the key we -use after unpacking is `CsaSeries.MrPhoenixProtocol.ulVersion`. +This same issue was solved for the translated Siemens CSA sub headers by +unpacking each nested dataset by joining the keys from each level with a +dotted notation. For example, in the `CsaSeries` subheader there is a nested +`MrPhoenixProtocol` dataset which has an element `ulVersion` so the key we +use after unpacking is `CsaSeries.MrPhoenixProtocol.ulVersion`. -We can take the same approach for DICOM sequence elements. One additional +We can take the same approach for DICOM sequence elements. One additional consideration is that each of these element is actually a list of data sets, so we would need to add an index number to the key somehow. -The alternative is to handle nested data sets recursively in the meta data -summarizing code. This would be fairly complex and you would no longer be -able to refer to each element with a single string, at least not without +The alternative is to handle nested data sets recursively in the meta data +summarizing code. This would be fairly complex and you would no longer be +able to refer to each element with a single string, at least not without some mini-language for traversing the nested datasets. Improving access to varying meta data through the Nifti ======================================================= -Currently, when accessing varying meta data through the `get_meta` method +Currently, when accessing varying meta data through the `get_meta` method you can only get one value at a time:: - >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx)) + >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx)) for idx in xrange(data.shape[-1])] -You can easily get multiple values from the `DcmMeta` object itself, but -then you lose the capability to automatically check if the meta data is +You can easily get multiple values from the `DcmMeta` object itself, but +then you lose the capability to automatically check if the meta data is valid in relation to the current image. diff --git a/doc/source/devel/register_me.py b/doc/source/devel/register_me.py index 76e0dbc641..017f873abf 100644 --- a/doc/source/devel/register_me.py +++ b/doc/source/devel/register_me.py @@ -1,7 +1,7 @@ -from os.path import join as pjoin, expanduser, abspath, dirname -import sys import configparser as cfp - +import sys +from os.path import abspath, dirname, expanduser +from os.path import join as pjoin if sys.platform == 'win32': HOME_INI = pjoin(expanduser('~'), '_dpkg', 'local.dsource') diff --git a/doc/source/devel/spm_use.rst b/doc/source/devel/spm_use.rst index 56c7051696..8c47cd7f5e 100644 --- a/doc/source/devel/spm_use.rst +++ b/doc/source/devel/spm_use.rst @@ -16,8 +16,8 @@ their call syntax is the same for example. >> fname = 'some_image.nii'; >> vol = spm_vol(fname) % the vol struct - - vol = + + vol = fname: 'some_image.nii' mat: [4x4 double] @@ -37,7 +37,7 @@ their call syntax is the same for example. 0 0 2 -74 0 0 0 1 - >> help spm_vol + >> help spm_vol Get header information etc for images. FORMAT V = spm_vol(P) P - a matrix of filenames. @@ -84,7 +84,7 @@ their call syntax is the same for example. >> vol.private - ans = + ans = NIFTI object: 1-by-1 dat: [91x109x91 file_array] @@ -113,7 +113,7 @@ Images in SPM are always 3D. Note this behavior: >> fname = 'functional_01.nii'; >> vol = spm_vol(fname) - vol = + vol = 191x1 struct array with fields: fname @@ -160,7 +160,7 @@ Some simple ones: >> new_vol.fname = new_fname; >> spm_write_vol(new_vol, img_arr) - ans = + ans = fname: 'another_image.nii' mat: [4x4 double] @@ -262,7 +262,7 @@ world coordinates according to the affine looks like: 53.0000 1.0000 - >> vals = spm_sample_vol(vol, vc(1), vc(2), vc(3), hold_val) + >> vals = spm_sample_vol(vol, vc(1), vc(2), vc(3), hold_val) vals = diff --git a/doc/source/dicom/dcm2nii_algorithms.rst b/doc/source/dicom/dcm2nii_algorithms.rst index 809ac51c51..88cd55dfcd 100644 --- a/doc/source/dicom/dcm2nii_algorithms.rst +++ b/doc/source/dicom/dcm2nii_algorithms.rst @@ -7,7 +7,7 @@ dcm2nii_ is an open source DICOM_ to nifti_ conversion program, written by Chris Rorden, in Delphi (object orientated pascal). It's part of Chris' popular mricron_ collection of programs. The source appears to -be best found on the `mricron NITRC site`_. It's BSD_ licensed. +be best found on the `mricron NITRC site`_. It's BSD_ licensed. .. _mricron NITRC site: https://www.nitrc.org/projects/mricron diff --git a/doc/source/dicom/derivations/dicom_mosaic.py b/doc/source/dicom/derivations/dicom_mosaic.py index 074c5491c1..5def2e9490 100644 --- a/doc/source/dicom/derivations/dicom_mosaic.py +++ b/doc/source/dicom/derivations/dicom_mosaic.py @@ -1,22 +1,19 @@ -""" Just showing the mosaic simplification """ +"""Just showing the mosaic simplification""" -from sympy import Matrix, Symbol, symbols, simplify +from sympy import Matrix, Symbol, simplify, symbols def numbered_matrix(nrows, ncols, symbol_prefix): - return Matrix(nrows, ncols, lambda i, j: Symbol( - symbol_prefix + '_{%d%d}' % (i+1, j+1))) + return Matrix(nrows, ncols, lambda i, j: Symbol(symbol_prefix + '_{%d%d}' % (i + 1, j + 1))) def numbered_vector(nrows, symbol_prefix): - return Matrix(nrows, 1, lambda i, j: Symbol( - symbol_prefix + '_{%d}' % (i+1))) + return Matrix(nrows, 1, lambda i, j: Symbol(symbol_prefix + '_{%d}' % (i + 1))) RS = numbered_matrix(3, 3, 'rs') -mdc, mdr, rdc, rdr = symbols( - 'md_{cols} md_{rows} rd_{cols} rd_{rows}') +mdc, mdr, rdc, rdr = symbols('md_{cols} md_{rows} rd_{cols} rd_{rows}') md_adj = Matrix((mdc - 1, mdr - 1, 0)) / -2 rd_adj = Matrix((rdc - 1, rdr - 1, 0)) / -2 @@ -24,8 +21,6 @@ def numbered_vector(nrows, symbol_prefix): adj = -(RS * md_adj) + RS * rd_adj adj.simplify() -Q = RS[:, :2] * Matrix(( - (mdc - rdc) / 2, - (mdr - rdr) / 2)) +Q = RS[:, :2] * Matrix(((mdc - rdc) / 2, (mdr - rdr) / 2)) assert simplify(adj - Q) == Matrix([0, 0, 0]) diff --git a/doc/source/dicom/derivations/spm_dicom_orient.py b/doc/source/dicom/derivations/spm_dicom_orient.py index 936e807ce1..12b3ee99b6 100644 --- a/doc/source/dicom/derivations/spm_dicom_orient.py +++ b/doc/source/dicom/derivations/spm_dicom_orient.py @@ -1,4 +1,4 @@ -""" Symbolic versions of the DICOM orientation mathemeatics. +"""Symbolic versions of the DICOM orientation mathemeatics. Notes on the SPM orientation machinery. @@ -8,20 +8,17 @@ """ import numpy as np - import sympy -from sympy import Matrix, Symbol, symbols, zeros, ones, eye +from sympy import Matrix, Symbol, eye, ones, symbols, zeros # The code below is general (independent of SPMs code) def numbered_matrix(nrows, ncols, symbol_prefix): - return Matrix(nrows, ncols, lambda i, j: Symbol( - symbol_prefix + '_{%d%d}' % (i + 1, j + 1))) + return Matrix(nrows, ncols, lambda i, j: Symbol(symbol_prefix + '_{%d%d}' % (i + 1, j + 1))) def numbered_vector(nrows, symbol_prefix): - return Matrix(nrows, 1, lambda i, j: Symbol( - symbol_prefix + '_{%d}' % (i + 1))) + return Matrix(nrows, 1, lambda i, j: Symbol(symbol_prefix + '_{%d}' % (i + 1))) # premultiplication matrix to go from 0 based to 1 based indexing @@ -46,7 +43,7 @@ def numbered_vector(nrows, symbol_prefix): R = zeros(4, 2) R[:3, :] = R3 -# The following is specific to the SPM algorithm. +# The following is specific to the SPM algorithm. x1 = ones(4, 1) y1 = ones(4, 1) y1[:3, :] = pos_pat_0 diff --git a/doc/source/dicom/dicom_fields.rst b/doc/source/dicom/dicom_fields.rst index 5d2af15739..f7d2ab8490 100644 --- a/doc/source/dicom/dicom_fields.rst +++ b/doc/source/dicom/dicom_fields.rst @@ -15,7 +15,7 @@ because we've covered those somewhat in :ref:`dicom-orientation`. Fields for ordering DICOM files into images =========================================== -You'll see some discussion of this in :ref:`spm-dicom`. +You'll see some discussion of this in :ref:`spm-dicom`. Section 7.3.1: general series module diff --git a/doc/source/dicom/dicom_info.rst b/doc/source/dicom/dicom_info.rst index a1173073fe..b9883d4bfc 100644 --- a/doc/source/dicom/dicom_info.rst +++ b/doc/source/dicom/dicom_info.rst @@ -47,5 +47,5 @@ Here is a selected list of other tools and relevant resources: * http://www.barre.nom.fr/medical/samples/ * http://pubimage.hcuge.ch:8080/ * Via links from the dcm2nii_ page. - + .. include:: ../links_names.txt diff --git a/doc/source/dicom/dicom_mosaic.rst b/doc/source/dicom/dicom_mosaic.rst index 5ff0f1fcf7..789247f3ff 100644 --- a/doc/source/dicom/dicom_mosaic.rst +++ b/doc/source/dicom/dicom_mosaic.rst @@ -17,7 +17,7 @@ with something like:: import dicom dcm_data = dicom.read_file('my_file.dcm') plt.imshow(dcm_data.pixel_array) - + .. image:: mosaic_grid.png Getting the slices from the mosaic @@ -83,7 +83,7 @@ rd_{cols})$ and the mosaic dimensions are $(md_{rows}, md_{cols})$. The .. math:: - \mathbf{i} = \mathbf{c} + RS + \mathbf{i} = \mathbf{c} + RS \begin{bmatrix} -(md_{rows}-1) / 2\\ -(md_{cols}-1) / 2\\ 0 \end{bmatrix} @@ -94,7 +94,7 @@ the true image position $\mathbf{t}$: .. math:: - \mathbf{t} = \mathbf{i} - + \mathbf{t} = \mathbf{i} - (RS \begin{bmatrix} -(md_{rows}-1) / 2\\ -(md_{cols}-1) / 2\\ 0 \end{bmatrix}) + @@ -106,11 +106,11 @@ Because of the final zero in the voxel translations, this simplifies to: .. math:: - \mathbf{t} = \mathbf{i} + + \mathbf{t} = \mathbf{i} + Q \begin{bmatrix} (md_{rows} - rd_{rowss}) / 2 \\ (md_{cols} - rd_{cols}) / 2 \end{bmatrix} -where: +where: .. math:: diff --git a/doc/source/dicom/dicom_orientation.rst b/doc/source/dicom/dicom_orientation.rst index dae0ea5c60..275b16ce78 100644 --- a/doc/source/dicom/dicom_orientation.rst +++ b/doc/source/dicom/dicom_orientation.rst @@ -17,7 +17,7 @@ definitions`_ (2009): patient. The y-axis is increasing to the posterior side of the patient. The z-axis is increasing toward the head of the patient. -(we'll ignore the quadupeds for now). +(we'll ignore the quadupeds for now). In a way it's funny to call this the 'patient-based' coordinate system. 'Doctor-based coordinate system' is a better name. Think of a doctor @@ -33,7 +33,7 @@ patient. DICOM pixel data ================ -C.7.6.3.1.4 - Pixel Data +C.7.6.3.1.4 - Pixel Data Pixel Data (7FE0,0010) for this image. The order of pixels sent for each image plane is left to right, top to bottom, i.e., the upper left pixel (labeled 1,1) is sent first followed by the remainder of @@ -110,21 +110,21 @@ system* - see `DICOM object definitions`_ section 3.17.1): \begin{bmatrix} P_x\\ P_y\\ P_z\\ - 1 \end{bmatrix} = - \begin{bmatrix} X_x\Delta{i} & Y_x\Delta{j} & 0 & S_x \\ + 1 \end{bmatrix} = + \begin{bmatrix} X_x\Delta{i} & Y_x\Delta{j} & 0 & S_x \\ X_y\Delta{i} & Y_y\Delta{j} & 0 & S_y \\ X_z\Delta{i} & Y_z\Delta{j} & 0 & S_z \\ 0 & 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} i\\ j\\ 0\\ - 1 \end{bmatrix} - = M + 1 \end{bmatrix} + = M \begin{bmatrix} i\\ j\\ 0\\ - 1 \end{bmatrix} - + 1 \end{bmatrix} + Where: #. $P_{xyz}$ : The coordinates of the voxel (i,j) in the frame's @@ -207,20 +207,20 @@ DICOM affine formula \begin{bmatrix} P_x\\ P_y\\ P_z\\ - 1 \end{bmatrix} = - \begin{bmatrix} F_{11}\Delta{r} & F_{12}\Delta{c} & 0 & S_x \\ + 1 \end{bmatrix} = + \begin{bmatrix} F_{11}\Delta{r} & F_{12}\Delta{c} & 0 & S_x \\ F_{21}\Delta{r} & F_{22}\Delta{c} & 0 & S_y \\ F_{31}\Delta{r} & F_{32}\Delta{c} & 0 & S_z \\ 0 & 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} r\\ c\\ 0\\ - 1 \end{bmatrix} - = A + 1 \end{bmatrix} + = A \begin{bmatrix} r\\ c\\ 0\\ - 1 \end{bmatrix} + 1 \end{bmatrix} Where: @@ -258,7 +258,7 @@ In the *multi slice* case, we can assume that the 'ImageOrientationPatient' field is the same for all the slices. We want to get the affine transformation matrix $A$ that maps from voxel -coordinates in the DICOM file(s), to mm in the :ref:`dicom-pcs`. +coordinates in the DICOM file(s), to mm in the :ref:`dicom-pcs`. By voxel coordinates, we mean coordinates of form $(r, c, s)$ - the row, column and slice indices - as for the :ref:`dicom-slice-affine`. @@ -305,7 +305,7 @@ For the multi-slice case, we can fill in $\mathbf{k}$ by using the information from $T^N$, because $T^N$ is the translation needed to take the first voxel in the last (slice index = $N-1$) slice to mm space. So: -.. math:: +.. math:: \left(\begin{smallmatrix}T^N\\1\end{smallmatrix}\right) = A \left(\begin{smallmatrix}0\\0\\N - 1\\1\end{smallmatrix}\right) @@ -325,7 +325,7 @@ and therefore: .. math:: A_{multi} = \left(\begin{smallmatrix}F_{{11}} \Delta{r} & F_{{12}} \Delta{c} & \frac{T^{N}_{{1}} - T^{1}_{{1}}}{N - 1} & T^{1}_{{1}}\\F_{{21}} \Delta{r} & F_{{22}} \Delta{c} & \frac{T^{N}_{{2}} - T^{1}_{{2}}}{N - 1} & T^{1}_{{2}}\\F_{{31}} \Delta{r} & F_{{32}} \Delta{c} & \frac{T^{N}_{{3}} - T^{1}_{{3}}}{N - 1} & T^{1}_{{3}}\\0 & 0 & 0 & 1\end{smallmatrix}\right) - + A_{single} = \left(\begin{smallmatrix}F_{{11}} \Delta{r} & F_{{12}} \Delta{c} & \Delta{s} n_{{1}} & T^{1}_{{1}}\\F_{{21}} \Delta{r} & F_{{22}} \Delta{c} & \Delta{s} n_{{2}} & T^{1}_{{2}}\\F_{{31}} \Delta{r} & F_{{32}} \Delta{c} & \Delta{s} n_{{3}} & T^{1}_{{3}}\\0 & 0 & 0 & 1\end{smallmatrix}\right) See :download:`derivations/spm_dicom_orient.py` for the derivations and @@ -410,4 +410,3 @@ plus a constant. Again, see :download:`derivations/spm_dicom_orient.py` for the derivations. .. include:: ../links_names.txt - diff --git a/doc/source/dicom/siemens_csa.rst b/doc/source/dicom/siemens_csa.rst index 7807f7b89f..9beec6150a 100644 --- a/doc/source/dicom/siemens_csa.rst +++ b/doc/source/dicom/siemens_csa.rst @@ -12,7 +12,7 @@ header. We'll call this the *CSA header*. CSA header ========== -See this Siemens `Syngo DICOM conformance`_ statement, and a GDCM_ +See this Siemens `Syngo DICOM conformance`_ statement, and a GDCM_ `Siemens header dump`_. .. _`Siemens header dump`: http://sourceforge.net/apps/mediawiki/gdcm/index.php?title=Gdcmdump#SIEMENS_CSA_Header @@ -38,7 +38,7 @@ same format. The fields can be of two types, CSA1 and CSA2. Both are always little-endian, whatever the machine endian is. The CSA2 format begins with the string 'SV10', the CSA1 format does -not. +not. The code below keeps track of the position *within the CSA header stream*. We'll call this ``csa_position``. At this point (after @@ -81,14 +81,14 @@ At this point SPM does a check, by calculating the length of this item If ``item_len`` is less than 0 or greater than ``csa_max_pos-csa_position`` (the remaining number of bytes to read in the whole header) then we break from the item reading loop, -setting the value below to ''. +setting the value below to ''. Then we calculate ``item_len`` rounded up to the nearest 4 byte boundary -tp get ``next_item_pos``. +tp get ``next_item_pos``. -2. value : uint8, ``item_len``. +2. value : uint8, ``item_len``. -We set the stream position to ``next_item_pos``. +We set the stream position to ``next_item_pos``. CSA2 ==== @@ -126,10 +126,10 @@ Now there's a different length check from CSA1. ``item_len`` is given just by ``xx[1]``. If ``item_len`` > ``csa_max_pos - csa_position`` (the remaining bytes in the header), then we just read the remaining bytes in the header (as above) into ``value`` below, as uint8, move the -filepointer to the next 4 byte boundary, and give up reading. +filepointer to the next 4 byte boundary, and give up reading. -2. value : uint8, ``item_len``. +2. value : uint8, ``item_len``. -We set the stream position to the next 4 byte boundary. +We set the stream position to the next 4 byte boundary. .. include:: ../links_names.txt diff --git a/doc/source/dicom/spm_dicom.rst b/doc/source/dicom/spm_dicom.rst index 67b6bcf0ca..5b0deb1672 100644 --- a/doc/source/dicom/spm_dicom.rst +++ b/doc/source/dicom/spm_dicom.rst @@ -5,7 +5,7 @@ ====================== These are some notes on the algorithms that SPM_ uses to convert from -DICOM_ to nifti_. There are other notes in :ref:`dicom-mosaic`. +DICOM_ to nifti_. There are other notes in :ref:`dicom-mosaic`. The relevant SPM files are ``spm_dicom_headers.m``, ``spm_dicom_dict.mat`` and ``spm_dicom_convert.m``. These notes refer @@ -29,7 +29,7 @@ written by John Ahsburner (JA). Relevant fixes are: File opening ------------ -When opening the DICOM file, SPM (subfunction ``readdicomfile``) +When opening the DICOM file, SPM (subfunction ``readdicomfile``) #. opens as little endian #. reads 4 characters starting at pos 128 @@ -76,7 +76,7 @@ explicit (as in 'explicit little endian'): There's a check for not-even tag length. If not even: -#. 4294967295 appears to be OK - and decoded as Inf for tag length. +#. 4294967295 appears to be OK - and decoded as Inf for tag length. #. 13 appears to mean 10 and is reset to be 10 #. Any other odd number is not valid and gives a tag length of 0 @@ -89,7 +89,7 @@ tag length of 13 set to tag length 10. ``spm_dicom_convert.m`` ======================= -Written by John Ashburner and Jesper Andersson. +Written by John Ashburner and Jesper Andersson. File categorization ------------------- @@ -97,7 +97,7 @@ File categorization SPM makes a special case of Siemens 'spectroscopy images'. These are images that have 'SOPClassUID' == '1.3.12.2.1107.5.9.1' and the private tag of (29, 1210); for these it pulls out the affine, and writes a -volume of ones corresponding to the acquisition planes. +volume of ones corresponding to the acquisition planes. For images that are not spectroscopy: @@ -111,7 +111,7 @@ For images that are not spectroscopy: * Fields 'SeriesNumber', 'AcquisitionNumber' and 'InstanceNumber' are set to 1 if absent. -Next SPM distinguishes between :ref:`dicom-mosaic` and standard DICOM. +Next SPM distinguishes between :ref:`dicom-mosaic` and standard DICOM. Mosaic images are those with the Siemens private tag:: @@ -140,7 +140,7 @@ Take first header, put as start of first volume. For each subsequent header: field in form of 9 integers separated by '_', where 'X' in this string replaced by '-1' - giving 'ICE1' -Then, for each currently identified volume: +Then, for each currently identified volume: #. If we have ICE1 above, and we do have 'CSAIMageHeaderInfo', with a 'name', in the first header in this volume, then extract ICE dims in @@ -180,7 +180,7 @@ For each volume: #. For each header in this volume, get the z coordinate by taking the dot product of the 'ImagePositionPatient' vector and ``z_dir_cos`` (see :ref:`dicom-z-from-slice`). -#. Sort the headers according to this estimated z coordinate. +#. Sort the headers according to this estimated z coordinate. #. If this volume is more than one slice, and there are any slices with the same z coordinate (as defined above), run the :ref:`dicom-img-resort` on this volume - on the basis that it may @@ -214,7 +214,7 @@ that the routine is still working on - ``work_list``. for making filenames. #. Calculate the z coordinate as for :ref:`spm-second-pass`, for each DICOM header. -#. Sort the headers by 'InstanceNumber' +#. Sort the headers by 'InstanceNumber' #. If any headers have the same 'InstanceNumber', then discard all but the first header with the same number. At this point the remaining headers in ``work_list`` will have different 'InstanceNumber's, but @@ -222,7 +222,7 @@ that the routine is still working on - ``work_list``. #. Now sort by z coordinate #. If there are ``N`` headers, make a ``N`` length vector of flags ``is_processed``, for which all values == False -#. Make an output list of header lists, call it ``hdr_vol_out``, set to empty. +#. Make an output list of header lists, call it ``hdr_vol_out``, set to empty. #. While there are still any False elements in ``is_processed``: #. Find first header for which corresponding ``is_processed`` is @@ -236,7 +236,7 @@ that the routine is still working on - ``work_list``. corresponding to ``zsind`` to ``hdr_vol_out[i]``. This assumes that the original ``work_list`` contained two or more volumes, each with an identical set of z coordinates. - #. Set corresponding ``is_processed`` flag to True for all ``z_same_indices``. + #. Set corresponding ``is_processed`` flag to True for all ``z_same_indices``. #. Finally, if the headers in ``work_list`` have 'InstanceNumber's that cannot be sorted to a sequence ascending in units of 1, or if any @@ -269,7 +269,7 @@ Then define the following matrices: .. math:: R = \left(\begin{smallmatrix}1 & a & 1 & 0\\1 & b & 0 & 1\\1 & c & 0 & 0\\1 & d & 0 & 0\end{smallmatrix}\right) - + L = \left(\begin{smallmatrix}T^{1}_{{1}} & e & F_{{11}} \Delta{r} & F_{{12}} \Delta{c}\\T^{1}_{{2}} & f & F_{{21}} \Delta{r} & F_{{22}} \Delta{c}\\T^{1}_{{3}} & g & F_{{31}} \Delta{r} & F_{{32}} \Delta{c}\\1 & h & 0 & 0\end{smallmatrix}\right) For a volume with more than one slice (header), then $a=1; b=1, c=N, d=1$. $e, f, g$ are the values from $T^N$, diff --git a/doc/source/gitwash/configure_git.rst b/doc/source/gitwash/configure_git.rst index 9911d7cbb1..a19f592bd5 100644 --- a/doc/source/gitwash/configure_git.rst +++ b/doc/source/gitwash/configure_git.rst @@ -138,19 +138,19 @@ and it gives graph / text output something like this (but with color!):: * 6d8e1ee - (HEAD, origin/my-fancy-feature, my-fancy-feature) NF - a fancy file (45 minutes ago) [Matthew Brett] * d304a73 - (origin/placeholder, placeholder) Merge pull request #48 from hhuuggoo/master (2 weeks ago) [Jonathan Terhorst] - |\ + |\ | * 4aff2a8 - fixed bug 35, and added a test in test_bugfixes (2 weeks ago) [Hugo] - |/ + |/ * a7ff2e5 - Added notes on discussion/proposal made during Data Array Summit. (2 weeks ago) [Corran Webster] * 68f6752 - Initial implementation of AxisIndexer - uses 'index_by' which needs to be changed to a call on an Axes object - this is all very sketchy right now. (2 weeks ago) [Corr * 376adbd - Merge pull request #46 from terhorst/master (2 weeks ago) [Jonathan Terhorst] - |\ + |\ | * b605216 - updated joshu example to current api (3 weeks ago) [Jonathan Terhorst] | * 2e991e8 - add testing for outer ufunc (3 weeks ago) [Jonathan Terhorst] | * 7beda5a - prevent axis from throwing an exception if testing equality with non-axis object (3 weeks ago) [Jonathan Terhorst] | * 65af65e - convert unit testing code to assertions (3 weeks ago) [Jonathan Terhorst] | * 956fbab - Merge remote-tracking branch 'upstream/master' (3 weeks ago) [Jonathan Terhorst] - | |\ + | |\ | |/ Thanks to Yury V. Zaytsev for posting it. diff --git a/doc/source/gitwash/development_workflow.rst b/doc/source/gitwash/development_workflow.rst index b89db449ba..7c117cfcce 100644 --- a/doc/source/gitwash/development_workflow.rst +++ b/doc/source/gitwash/development_workflow.rst @@ -22,7 +22,7 @@ In what follows we'll refer to the upstream nibabel ``master`` branch, as * Name your branch for the purpose of the changes - e.g. ``bugfix-for-issue-14`` or ``refactor-database-code``. * If you can possibly avoid it, avoid merging trunk or any other branches into - your feature branch while you are working. + your feature branch while you are working. * If you do find yourself merging from trunk, consider :ref:`rebase-on-trunk` * Ask on the `nibabel mailing list`_ if you get stuck. * Ask for code review! diff --git a/doc/source/gitwash/forking_hell.rst b/doc/source/gitwash/forking_hell.rst index d70b28ffdf..1dd14f4618 100644 --- a/doc/source/gitwash/forking_hell.rst +++ b/doc/source/gitwash/forking_hell.rst @@ -30,4 +30,3 @@ Create your own forked copy of nibabel_ should find yourself at the home page for your own forked copy of nibabel_. .. include:: links.inc - diff --git a/doc/source/gitwash/git_resources.rst b/doc/source/gitwash/git_resources.rst index ba7b275e05..d18b0ef48b 100644 --- a/doc/source/gitwash/git_resources.rst +++ b/doc/source/gitwash/git_resources.rst @@ -9,9 +9,9 @@ Tutorials and summaries * `github help`_ has an excellent series of how-to guides. * `learn.github`_ has an excellent series of tutorials -* The `pro git book`_ is a good in-depth book on git. +* The `pro git book`_ is a good in-depth book on git. * A `git cheat sheet`_ is a page giving summaries of common commands. -* The `git user manual`_ +* The `git user manual`_ * The `git tutorial`_ * The `git community book`_ * `git ready`_ |emdash| a nice series of tutorials diff --git a/doc/source/gitwash/index.rst b/doc/source/gitwash/index.rst index 4eae7b7008..9dcc741fbc 100644 --- a/doc/source/gitwash/index.rst +++ b/doc/source/gitwash/index.rst @@ -14,5 +14,3 @@ Contents: patching git_development git_resources - - diff --git a/doc/source/gitwash/set_up_fork.rst b/doc/source/gitwash/set_up_fork.rst index bbdb43fe8d..c4fb086bf0 100644 --- a/doc/source/gitwash/set_up_fork.rst +++ b/doc/source/gitwash/set_up_fork.rst @@ -65,4 +65,3 @@ Just for your own satisfaction, show yourself that you now have a new origin git@github.com:your-user-name/nibabel.git (push) .. include:: links.inc - diff --git a/doc/source/installing_data.rst b/doc/source/installing_data.rst index c1b335fd02..ce32de2375 100644 --- a/doc/source/installing_data.rst +++ b/doc/source/installing_data.rst @@ -10,7 +10,7 @@ packages for some of the DICOM tests in nibabel. There are also data packages for standard template images, and other packages for components of nipy, including the main nipy package. -For more details on data package design, see :ref:`data-package-design`. +For more details on data package design, see :ref:`data-package-design`. We haven't yet made a nice automated way of downloading and installing the packages. For the moment you can find packages for the data and template files @@ -78,4 +78,3 @@ with contents:: [DATA] c:\some\path\share\nipy - diff --git a/doc/source/notebooks/ata_error.ipynb b/doc/source/notebooks/ata_error.ipynb index 216f754161..5a26ed0f98 100644 --- a/doc/source/notebooks/ata_error.ipynb +++ b/doc/source/notebooks/ata_error.ipynb @@ -248,4 +248,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/doc/source/notebooks/cross_product_error.ipynb b/doc/source/notebooks/cross_product_error.ipynb index 1889c3be7d..bcf8c23d36 100644 --- a/doc/source/notebooks/cross_product_error.ipynb +++ b/doc/source/notebooks/cross_product_error.ipynb @@ -151,4 +151,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/doc/source/old/design.txt b/doc/source/old/design.txt index f2d30ddf56..35901977b5 100644 --- a/doc/source/old/design.txt +++ b/doc/source/old/design.txt @@ -46,7 +46,7 @@ by the image itself. frame; for reading, we just cast up to a dtype that can hold all the frame dtypes; for writing, we may just write as one type, or disallow writing altogether. -* array shape - ``shape``. +* array shape - ``shape``. * byte offset - ``offset`` at which data starts. This is not relevant for the way we currently read MINC files for example - and may not be relevant for ECAT files, in the sense that it may be the offset to @@ -67,7 +67,7 @@ We think of an image as being the association of: .. note:: - Why are the first three dimensions spatial? + Why are the first three dimensions spatial? For simplicity, we want the transformation (above) to be spatial. Because the images are always at least 3D, and the transform is @@ -105,7 +105,7 @@ image format API - see :ref:`image-formats` This immediately suggests the following interface:: - img = Image(data, affine=None, output_space=None, + img = Image(data, affine=None, output_space=None, meta=None, format=None, filename=None) The output space is a string @@ -123,12 +123,12 @@ might imagine these methods:: img.load(filename, format=None) # class method img.save(filename=None, format=None) img.as_file(filemaker=None, format=None) - + and some things that formats generally support like:: img.write_header(filename=None) img.write_data(data=None, filename=None, slicedef=None) - + ``img.as_file`` returns the image as saved to disk; the image might completely correspond to something on disk, in which case it may return its own filename, or it might not correspond to something on disk, in @@ -192,12 +192,12 @@ SPM Analyze adds an optional extra data file in Matlab ``.mat`` format:: some_image.mat Of course there are rules / rules-of-thumb as to what extensions these -various filenames can be. +various filenames can be. We may want to associate an image with a filename or set of filenames. But we may also want to be able to associate images with file-like objects, such as open files, or anything else that implements a file -protocol. +protocol. The image ``format`` will know what the ``image`` needs in terms of files. For example, a single file NIfTI image will need a single @@ -208,7 +208,7 @@ Let's call a full specification of what the format needs a *filedef*. For the moment, let's imagine that is a dictionary with keys ``image``, ``header``, and optional ``mat``. The values can be filenames or file-like objects. A *filespec* is some argument or set of arguments -that allow us to fully specify a *filedef*. +that allow us to fully specify a *filedef*. The simple case of a single-file NIfTI image:: @@ -221,7 +221,7 @@ tries to work out the format from the filespec. Consider:: - img = Image(data, filespec='some_image.nii', + img = Image(data, filespec='some_image.nii', format=Nifti1SingleFormat) also OK. But:: @@ -255,7 +255,3 @@ might raise an error, on the lines of:: - or it might just assume that you mean for the image and the header to be the same file. Perhaps that is too implicit. - - - - diff --git a/doc/source/old/format_design.txt b/doc/source/old/format_design.txt index ad0d23e40d..29585866a9 100644 --- a/doc/source/old/format_design.txt +++ b/doc/source/old/format_design.txt @@ -1,4 +1,4 @@ -.. -*- rst -*- +.. -*- rst -*- .. _image-formats: @@ -15,7 +15,7 @@ diagram Image class plays the role of the Abstraction, and the Format plays the role of the implementor. -The Format object provides an interface to the underlying file format. +The Format object provides an interface to the underlying file format. The Image has the following methods: @@ -107,6 +107,3 @@ format-specific tasks:: fmt.set_sform(np.eye(4) * 2) fmt.fields['descrip'] = 'some information' fmt.to_filename('another_file.nii') - - - diff --git a/doc/source/old/orientation.txt b/doc/source/old/orientation.txt index e74d65517f..b44a11e309 100644 --- a/doc/source/old/orientation.txt +++ b/doc/source/old/orientation.txt @@ -135,4 +135,3 @@ left-right by default. We chose this break from the standard because that is what SPM does with non-affine niftis, and because it seemed more sensible, and because it's more consistent with what we do with SPM non-nifti images (not surprisingly). - diff --git a/doc/source/scripts/make_coord_examples.py b/doc/source/scripts/make_coord_examples.py index f763b28c28..aa83fbcd84 100644 --- a/doc/source/scripts/make_coord_examples.py +++ b/doc/source/scripts/make_coord_examples.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -""" Make graphics and example image for coordinate tutorial +"""Make graphics and example image for coordinate tutorial Expects MNI nonlinear template t1 and t2 images in directory of script - specifically these files: @@ -18,16 +18,15 @@ import math +import matplotlib.pyplot as plt +import nipy +import nipy.algorithms.resample as rsm +import nipy.core.api as nca import numpy as np import numpy.linalg as npl import nibabel.eulerangles as euler -import nipy -import nipy.core.api as nca -import nipy.algorithms.resample as rsm -import matplotlib.pyplot as plt - T1_IMG = 'mni_icbm152_t1_tal_nlin_asym_09a.nii' T2_IMG = 'mni_icbm152_t2_tal_nlin_asym_09a.nii' @@ -36,7 +35,7 @@ img = nipy.load_image(img_fname) # Set affine as for FOV, not AC RZS = img.affine[:3, :3] - vox_fov_center = -(np.array(img.shape) - 1) / 2. + vox_fov_center = -(np.array(img.shape) - 1) / 2.0 T = RZS.dot(vox_fov_center) img.affine[:3, 3] = T # Take stuff off the top of the full image, to emphasize FOV @@ -63,18 +62,18 @@ epi_br = np.array((92, 70)) * 2 epi_tl = np.array((7, 63)) * 2 # Find lengths of sides -epi_y_len = np.sqrt((np.subtract(epi_bl, epi_tl)**2).sum()) -epi_x_len = np.sqrt((np.subtract(epi_bl, epi_br)**2).sum()) +epi_y_len = np.sqrt((np.subtract(epi_bl, epi_tl) ** 2).sum()) +epi_x_len = np.sqrt((np.subtract(epi_bl, epi_br) ** 2).sum()) x, y = 0, 1 # Make a rectangular box with these sides + def make_ortho_box(bl, x_len, y_len): - """ Make a box with sides parallel to the axes - """ - return np.array((bl, - [bl[x] + x_len, bl[y]], - [bl[x], bl[y] + y_len], - [bl[x] + x_len, bl[y] + y_len])) + """Make a box with sides parallel to the axes""" + return np.array( + (bl, [bl[x] + x_len, bl[y]], [bl[x], bl[y] + y_len], [bl[x] + x_len, bl[y] + y_len]) + ) + orth_epi_box = make_ortho_box(epi_bl, epi_x_len, epi_y_len) @@ -86,8 +85,7 @@ def make_ortho_box(bl, x_len, y_len): def plot_line(pt1, pt2, fmt='r-', label=None): - plt.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], fmt, - label=label) + plt.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], fmt, label=label) def plot_box(box_def, fmt='r-', label=None): @@ -103,22 +101,18 @@ def rotate_box(box_def, angle, origin): box_def_zeroed = box_def - origin cost = math.cos(angle) sint = math.sin(angle) - rot_array = np.array([[cost, -sint], - [sint, cost]]) + rot_array = np.array([[cost, -sint], [sint, cost]]) box_def_zeroed = np.dot(rot_array, box_def_zeroed.T).T return box_def_zeroed + origin def labeled_point(pt, marker, text, markersize=10, color='k'): plt.plot(pt[0], pt[1], marker, markersize=markersize) - plt.text(pt[0] + markersize / 2, - pt[1] - markersize / 2, - text, - color=color) + plt.text(pt[0] + markersize / 2, pt[1] - markersize / 2, text, color=color) def plot_localizer(): - plt.imshow(sagittal, cmap="gray", origin='lower', extent=sag_extents) + plt.imshow(sagittal, cmap='gray', origin='lower', extent=sag_extents) plt.xlabel('mm from isocenter') plt.ylabel('mm from isocenter') @@ -126,8 +120,10 @@ def plot_localizer(): def save_plot(): # Plot using global variables plot_localizer() + def vx2mm(pts): return pts - iso_center + plot_box(vx2mm(rot_box), label='EPI bounding box') plot_box(vx2mm(anat_box), 'b-', label='Structural bounding box') labeled_point(vx2mm(epi_center), 'ro', 'EPI FOV center') @@ -145,7 +141,7 @@ def vx2mm(pts): anat_center = np.mean(anat_box, axis=0) # y axis on the plot is first axis of image sag_y, sag_x = sagittal.shape -iso_center = (np.array([sag_x, sag_y]) - 1) / 2. +iso_center = (np.array([sag_x, sag_y]) - 1) / 2.0 sag_extents = [-iso_center[0], iso_center[0], -iso_center[1], iso_center[1]] # Back to image coordinates @@ -155,7 +151,7 @@ def vx2mm(pts): rot = np.eye(4) rot[:3, :3] = euler.euler2mat(0, 0, -angle) # downsample to make smaller output image -downsamp = 1/3 +downsamp = 1 / 3 epi_scale = np.diag([downsamp, downsamp, downsamp, 1]) # template voxels to epi box image voxels vox2epi_vox = epi_scale.dot(rot.dot(epi_trans)) @@ -165,8 +161,7 @@ def vx2mm(pts): epi_vox_shape = np.array([data.shape[0], epi_x_len, epi_y_len]) * downsamp # Make sure dimensions are odd by rounding up or down # This makes the voxel center an integer index, which is convenient -epi_vox_shape = [np.floor(d) if np.floor(d) % 2 else np.ceil(d) - for d in epi_vox_shape] +epi_vox_shape = [np.floor(d) if np.floor(d) % 2 else np.ceil(d) for d in epi_vox_shape] # resample, preserving affine epi_cmap = nca.vox2mni(epi_vox2mm) epi = rsm.resample(t2_img, epi_cmap, np.eye(4), epi_vox_shape) @@ -178,8 +173,7 @@ def vx2mm(pts): anat_trans[:3, 3] = -np.array([0, anat_box[0, 0], anat_box[0, 1]]) vox2anat_vox = anat_scale.dot(anat_trans) anat_vox2mm = t1_img.affine.dot(npl.inv(vox2anat_vox)) -anat_vox_shape = np.round(np.divide( - [data.shape[0], anat_x_len, anat_y_len], anat_vox_sizes)) +anat_vox_shape = np.round(np.divide([data.shape[0], anat_x_len, anat_y_len], anat_vox_sizes)) anat_cmap = nca.vox2mni(anat_vox2mm) anat = rsm.resample(t1_img, anat_cmap, np.eye(4), anat_vox_shape) anat_data = anat.get_fdata() diff --git a/doc/tools/LICENSE.txt b/doc/tools/LICENSE.txt index 9e1d415af8..50431cd88e 100644 --- a/doc/tools/LICENSE.txt +++ b/doc/tools/LICENSE.txt @@ -4,4 +4,3 @@ https://www.mail-archive.com/sphinx-dev@googlegroups.com/msg02472.html and were released under a BSD/MIT license by Fernando Perez, Matthew Brett and the PyMVPA folks. Further cleanups by the scikit-image crew. - diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index 68d8f68749..3167362643 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -21,7 +21,6 @@ import os import re from inspect import getmodule - from types import BuiltinFunctionType, FunctionType # suppress print statements (warnings for empty files) @@ -29,20 +28,21 @@ class ApiDocWriter: - """ Class for automatic detection and parsing of API docs + """Class for automatic detection and parsing of API docs to Sphinx-parsable reST format""" # only separating first two levels rst_section_levels = ['*', '=', '-', '~', '^'] - def __init__(self, - package_name, - rst_extension='.txt', - package_skip_patterns=None, - module_skip_patterns=None, - other_defines=True - ): - r""" Initialize package for parsing + def __init__( + self, + package_name, + rst_extension='.txt', + package_skip_patterns=None, + module_skip_patterns=None, + other_defines=True, + ): + r"""Initialize package for parsing Parameters ---------- @@ -85,7 +85,7 @@ def get_package_name(self): return self._package_name def set_package_name(self, package_name): - """ Set package_name + """Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx @@ -102,11 +102,10 @@ def set_package_name(self, package_name): self.root_path = root_module.__path__[-1] self.written_modules = None - package_name = property(get_package_name, set_package_name, None, - 'get/set package_name') + package_name = property(get_package_name, set_package_name, None, 'get/set package_name') def _import(self, name): - """ Import namespace package """ + """Import namespace package""" mod = __import__(name) components = name.split('.') for comp in components[1:]: @@ -114,7 +113,7 @@ def _import(self, name): return mod def _get_object_name(self, line): - """ Get second token in line + """Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' @@ -129,7 +128,7 @@ def _get_object_name(self, line): return name.rstrip(':') def _uri2path(self, uri): - """ Convert uri to absolute filepath + """Convert uri to absolute filepath Parameters ---------- @@ -171,7 +170,7 @@ def _uri2path(self, uri): return path def _path2uri(self, dirpath): - """ Convert directory path to uri """ + """Convert directory path to uri""" package_dir = self.package_name.replace('.', os.path.sep) relpath = dirpath.replace(self.root_path, package_dir) if relpath.startswith(os.path.sep): @@ -179,7 +178,7 @@ def _path2uri(self, dirpath): return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): - """ Parse module defined in *uri* """ + """Parse module defined in *uri*""" filename = self._uri2path(uri) if filename is None: print(filename, 'erk') @@ -233,7 +232,7 @@ def _parse_module_with_import(self, uri): return functions, classes def _parse_lines(self, linesource): - """ Parse lines of text for functions and classes """ + """Parse lines of text for functions and classes""" functions = [] classes = [] for line in linesource: @@ -293,16 +292,16 @@ def generate_api_doc(self, uri): head += '\n.. currentmodule:: ' + uri + '\n' body += '\n.. currentmodule:: ' + uri + '\n\n' for c in classes: - body += '\n:class:`' + c + '`\n' \ - + self.rst_section_levels[3] * \ - (len(c)+9) + '\n\n' + body += '\n:class:`' + c + '`\n' + self.rst_section_levels[3] * (len(c) + 9) + '\n\n' body += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working - body += ' :members:\n' \ - ' :undoc-members:\n' \ - ' :show-inheritance:\n' \ - '\n' \ + body += ( + ' :members:\n' + ' :undoc-members:\n' + ' :show-inheritance:\n' + '\n' ' .. automethod:: __init__\n\n' + ) head += '.. autosummary::\n\n' for f in classes + functions: head += ' ' + f + '\n' @@ -317,7 +316,7 @@ def generate_api_doc(self, uri): return head, body def _survives_exclude(self, matchstr, match_type): - """ Returns True if *matchstr* does not match patterns + """Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present @@ -358,7 +357,7 @@ def _survives_exclude(self, matchstr, match_type): return True def discover_modules(self): - r""" Return module sequence discovered from ``self.package_name`` + r"""Return module sequence discovered from ``self.package_name`` Parameters @@ -385,22 +384,21 @@ def discover_modules(self): # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages - root_uri = self._path2uri(os.path.join(self.root_path, - dirpath)) + root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) # Normally, we'd only iterate over dirnames, but since # dipy does not import a whole bunch of modules we'll # include those here as well (the *.py filenames). - filenames = [f[:-3] for f in filenames if - f.endswith('.py') and not f.startswith('__init__')] + filenames = [ + f[:-3] for f in filenames if f.endswith('.py') and not f.startswith('__init__') + ] for filename in filenames: package_uri = '/'.join((dirpath, filename)) for subpkg_name in dirnames + filenames: package_uri = '.'.join((root_uri, subpkg_name)) package_path = self._uri2path(package_uri) - if (package_path and - self._survives_exclude(package_uri, 'package')): + if package_path and self._survives_exclude(package_uri, 'package'): modules.append(package_uri) return sorted(modules) @@ -408,10 +406,12 @@ def discover_modules(self): def write_modules_api(self, modules, outdir): # upper-level modules main_module = modules[0].split('.')[0] - ulms = ['.'.join(m.split('.')[:2]) if m.count('.') >= 1 - else m.split('.')[0] for m in modules] + ulms = [ + '.'.join(m.split('.')[:2]) if m.count('.') >= 1 else m.split('.')[0] for m in modules + ] from collections import OrderedDict + module_by_ulm = OrderedDict() for v, k in zip(modules, ulms): @@ -423,12 +423,12 @@ def write_modules_api(self, modules, outdir): written_modules = [] for ulm, mods in module_by_ulm.items(): - print(f"Generating docs for {ulm}:") + print(f'Generating docs for {ulm}:') document_head = [] document_body = [] for m in mods: - print(" -> " + m) + print(' -> ' + m) head, body = self.generate_api_doc(m) document_head.append(head) @@ -488,20 +488,19 @@ def write_index(self, outdir, froot='gen', relative_to=None): if self.written_modules is None: raise ValueError('No modules written') # Get full filename path - path = os.path.join(outdir, froot+self.rst_extension) + path = os.path.join(outdir, froot + self.rst_extension) # Path written into index is relative to rootpath if relative_to is not None: - relpath = ( - outdir + os.path.sep).replace(relative_to + os.path.sep, '') + relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '') else: relpath = outdir idx = open(path, 'wt') w = idx.write w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') - title = "API Reference" - w(title + "\n") - w("=" * len(title) + "\n\n") + title = 'API Reference' + w(title + '\n') + w('=' * len(title) + '\n\n') w('.. toctree::\n\n') for f in self.written_modules: w(f' {os.path.join(relpath, f)}\n') diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index 007175a262..2fded8fbfc 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -2,18 +2,19 @@ """Script to auto-generate our API docs. """ +import os +import re + # stdlib imports import sys -import re -import os + +# version comparison +from distutils.version import LooseVersion as V from os.path import join as pjoin # local imports from apigen import ApiDocWriter -# version comparison -from distutils.version import LooseVersion as V - # ***************************************************************************** @@ -38,7 +39,7 @@ def abort(error): try: __import__(package) except ImportError as e: - abort("Can not import " + package) + abort('Can not import ' + package) module = sys.modules[package] @@ -54,6 +55,7 @@ def abort(error): if os.path.exists(version_file): # Versioneer from runpy import run_path + try: source_version = run_path(version_file)['get_versions']()['version'] except (FileNotFoundError, KeyError): @@ -64,27 +66,30 @@ def abort(error): # Legacy fall-back info_file = pjoin('..', package, 'info.py') info_lines = open(info_file).readlines() - source_version = '.'.join([v.split('=')[1].strip(" '\n.") - for v in info_lines if re.match( - '^_version_(major|minor|micro|extra)', v - )]) + source_version = '.'.join( + [ + v.split('=')[1].strip(" '\n.") + for v in info_lines + if re.match('^_version_(major|minor|micro|extra)', v) + ] + ) print('***', source_version) if source_version != installed_version: - abort("Installed version does not match source version") - - docwriter = ApiDocWriter(package, rst_extension='.rst', - other_defines=other_defines) - docwriter.package_skip_patterns += [r'\.fixes$', - r'\.fixes.*$', - r'\.externals$', - r'\.externals.*$', - r'.*test.*$', - r'\.info.*$', - r'\.pkg_info.*$', - r'\.py3k.*$', - r'\._version.*$', - ] + abort('Installed version does not match source version') + + docwriter = ApiDocWriter(package, rst_extension='.rst', other_defines=other_defines) + docwriter.package_skip_patterns += [ + r'\.fixes$', + r'\.fixes.*$', + r'\.externals$', + r'\.externals.*$', + r'.*test.*$', + r'\.info.*$', + r'\.pkg_info.*$', + r'\.py3k.*$', + r'\._version.*$', + ] docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'index', relative_to=outdir) print('%d files written' % len(docwriter.written_modules)) diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 1649ba62da..53f4a32bdc 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -56,7 +56,7 @@ def test_viewer(): with pytest.raises( ( ValueError, # MPL3.5 and lower - KeyError, # MPL3.6 and higher + KeyError, # MPL3.6 and higher ) ): OrthoSlicer3D.cmap.fset(v, 'foo') # wrong cmap diff --git a/nisext/__init__.py b/nisext/__init__.py index 644503e3f7..b556c66d13 100644 --- a/nisext/__init__.py +++ b/nisext/__init__.py @@ -1,7 +1,6 @@ # init for sext package -""" Setuptools extensions +"""Setuptools extensions nibabel uses these routines, and houses them, and installs them. nipy-proper and dipy use them. """ - diff --git a/nisext/py3builder.py b/nisext/py3builder.py index 7bcaf2348c..24bd298364 100644 --- a/nisext/py3builder.py +++ b/nisext/py3builder.py @@ -1,4 +1,4 @@ -""" distutils utilities for porting to python 3 within 2-compatible tree """ +"""distutils utilities for porting to python 3 within 2-compatible tree""" try: @@ -6,20 +6,20 @@ except ImportError: # 2.x - no parsing of code from distutils.command.build_py import build_py -else: # Python 3 +else: # Python 3 # Command to also apply 2to3 to doctests from distutils import log + class build_py(build_py_2to3): def run_2to3(self, files): # Add doctest parsing; this stuff copied from distutils.utils in # python 3.2 source if not files: return - fixer_names, options, explicit = (self.fixer_names, - self.options, - self.explicit) + fixer_names, options, explicit = (self.fixer_names, self.options, self.explicit) # Make this class local, to delay import of 2to3 from lib2to3.refactor import RefactoringTool, get_fixers_from_package + class DistutilsRefactoringTool(RefactoringTool): def log_error(self, msg, *args, **kw): log.error(msg, *args) diff --git a/nisext/sexts.py b/nisext/sexts.py index 6ececdac78..b206588dec 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -1,19 +1,18 @@ -""" Distutils / setuptools helpers """ +"""Distutils / setuptools helpers""" import os -from os.path import join as pjoin, split as psplit, splitext - from configparser import ConfigParser - -from distutils.version import LooseVersion +from distutils import log from distutils.command.build_py import build_py from distutils.command.install_scripts import install_scripts - -from distutils import log +from distutils.version import LooseVersion +from os.path import join as pjoin +from os.path import split as psplit +from os.path import splitext def get_comrec_build(pkg_dir, build_cmd=build_py): - """ Return extended build command class for recording commit + """Return extended build command class for recording commit The extended command tries to run git to find the current commit, getting the empty string if it fails. It then writes the commit hash into a file @@ -47,15 +46,20 @@ def get_comrec_build(pkg_dir, build_cmd=build_py): information at the terminal. See the ``pkg_info.py`` module in the nipy package for an example. """ + class MyBuildPy(build_cmd): - """ Subclass to write commit data into installation tree """ + """Subclass to write commit data into installation tree""" + def run(self): build_cmd.run(self) import subprocess - proc = subprocess.Popen('git rev-parse --short HEAD', - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) + + proc = subprocess.Popen( + 'git rev-parse --short HEAD', + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) repo_commit, _ = proc.communicate() # Fix for python 3 repo_commit = str(repo_commit) @@ -65,11 +69,12 @@ def run(self): cfg_parser.set('commit hash', 'install_hash', repo_commit) out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt') cfg_parser.write(open(out_pth, 'wt')) + return MyBuildPy def _add_append_key(in_dict, key, value): - """ Helper for appending dependencies to setuptools args """ + """Helper for appending dependencies to setuptools args""" # If in_dict[key] does not exist, create it # If in_dict[key] is a string, make it len 1 list of strings # Append value to in_dict[key] list @@ -81,14 +86,16 @@ def _add_append_key(in_dict, key, value): # Dependency checks -def package_check(pkg_name, version=None, - optional=False, - checker=LooseVersion, - version_getter=None, - messages=None, - setuptools_args=None - ): - """ Check if package `pkg_name` is present and has good enough version +def package_check( + pkg_name, + version=None, + optional=False, + checker=LooseVersion, + version_getter=None, + messages=None, + setuptools_args=None, +): + """Check if package `pkg_name` is present and has good enough version Has two modes of operation. If `setuptools_args` is None (the default), raise an error for missing non-optional dependencies and log warnings for @@ -134,42 +141,35 @@ def package_check(pkg_name, version=None, setuptools_mode = not setuptools_args is None optional_tf = bool(optional) if version_getter is None: + def version_getter(pkg_name): mod = __import__(pkg_name) return mod.__version__ + if messages is None: messages = {} msgs = { - 'missing': 'Cannot import package "%s" - is it installed?', - 'missing opt': 'Missing optional package "%s"', - 'opt suffix': '; you may get run-time errors', - 'version too old': 'You have version %s of package "%s"' - ' but we need version >= %s', } + 'missing': 'Cannot import package "%s" - is it installed?', + 'missing opt': 'Missing optional package "%s"', + 'opt suffix': '; you may get run-time errors', + 'version too old': 'You have version %s of package "%s" but we need version >= %s', + } msgs.update(messages) - status, have_version = _package_status(pkg_name, - version, - version_getter, - checker) + status, have_version = _package_status(pkg_name, version, version_getter, checker) if status == 'satisfied': return if not setuptools_mode: if status == 'missing': if not optional_tf: raise RuntimeError(msgs['missing'] % pkg_name) - log.warn(msgs['missing opt'] % pkg_name + - msgs['opt suffix']) + log.warn(msgs['missing opt'] % pkg_name + msgs['opt suffix']) return elif status == 'no-version': raise RuntimeError(f'Cannot find version for {pkg_name}') assert status == 'low-version' if not optional_tf: - raise RuntimeError(msgs['version too old'] % (have_version, - pkg_name, - version)) - log.warn(msgs['version too old'] % (have_version, - pkg_name, - version) - + msgs['opt suffix']) + raise RuntimeError(msgs['version too old'] % (have_version, pkg_name, version)) + log.warn(msgs['version too old'] % (have_version, pkg_name, version) + msgs['opt suffix']) return # setuptools mode if optional_tf and not isinstance(optional, str): @@ -180,9 +180,7 @@ def version_getter(pkg_name): if optional_tf: if not 'extras_require' in setuptools_args: setuptools_args['extras_require'] = {} - _add_append_key(setuptools_args['extras_require'], - optional, - dependency) + _add_append_key(setuptools_args['extras_require'], optional, dependency) else: _add_append_key(setuptools_args, 'install_requires', dependency) @@ -203,8 +201,7 @@ def _package_status(pkg_name, version, version_getter, checker): return 'satisfied', have_version -BAT_TEMPLATE = \ -r"""@echo off +BAT_TEMPLATE = r"""@echo off REM wrapper to use shebang first line of {FNAME} set mypath=%~dp0 set pyscript="%mypath%{FNAME}" @@ -217,8 +214,9 @@ def _package_status(pkg_name, version, version_getter, checker): call "%py_exe%" %pyscript% %* """ + class install_scripts_bat(install_scripts): - """ Make scripts executable on Windows + """Make scripts executable on Windows Scripts are bare file names without extension on Unix, fitting (for example) Debian rules. They identify as python scripts with the usual ``#!`` first @@ -234,25 +232,24 @@ class install_scripts_bat(install_scripts): example at git://github.com/matthew-brett/myscripter.git for more background. """ + def run(self): install_scripts.run(self) - if not os.name == "nt": + if not os.name == 'nt': return for filepath in self.get_outputs(): # If we can find an executable name in the #! top line of the script # file, make .bat wrapper for script. with open(filepath, 'rt') as fobj: first_line = fobj.readline() - if not (first_line.startswith('#!') and - 'python' in first_line.lower()): - log.info("No #!python executable found, skipping .bat " - "wrapper") + if not (first_line.startswith('#!') and 'python' in first_line.lower()): + log.info('No #!python executable found, skipping .bat wrapper') continue pth, fname = psplit(filepath) froot, ext = splitext(fname) bat_file = pjoin(pth, froot + '.bat') bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname) - log.info(f"Making {bat_file} wrapper for {filepath}") + log.info(f'Making {bat_file} wrapper for {filepath}') if self.dry_run: continue with open(bat_file, 'wt') as fobj: @@ -268,7 +265,7 @@ def __init__(self, vars): def read_vars_from(ver_file): - """ Read variables from Python text file + """Read variables from Python text file Parameters ---------- diff --git a/nisext/testers.py b/nisext/testers.py index 05b2d92a3e..07f71af696 100644 --- a/nisext/testers.py +++ b/nisext/testers.py @@ -1,4 +1,4 @@ -""" Test package information in various install settings +"""Test package information in various install settings The routines here install the package from source directories, zips or eggs, and check these installations by running tests, checking version information, @@ -26,28 +26,29 @@ # Run tests from binary egg bdist-egg-tests: $(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel")' - """ import os -import sys -from os.path import join as pjoin, abspath -from glob import glob +import re import shutil +import sys import tempfile import zipfile -import re -from subprocess import Popen, PIPE +from glob import glob +from os.path import abspath +from os.path import join as pjoin +from subprocess import PIPE, Popen NEEDS_SHELL = os.name != 'nt' -PYTHON=sys.executable +PYTHON = sys.executable HAVE_PUTENV = hasattr(os, 'putenv') PY_LIB_SDIR = 'pylib' + def back_tick(cmd, ret_err=False, as_str=True): - """ Run command `cmd`, return stdout, or stdout, stderr if `ret_err` + """Run command `cmd`, return stdout, or stdout, stderr if `ret_err` Roughly equivalent to ``check_output`` in Python 2.7 @@ -94,7 +95,7 @@ def back_tick(cmd, ret_err=False, as_str=True): def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): - """ Run command in own process in anonymous path + """Run command in own process in anonymous path Parameters ---------- @@ -127,15 +128,16 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): # (via `cmd`). Consider that PYTHONPATH may not be set. Because the # command might run scripts via the shell, prepend script_dir to the # system path also. - paths_add = \ -r""" + paths_add = r""" os.environ['PATH'] = r'"{script_dir}"' + os.path.pathsep + os.environ['PATH'] PYTHONPATH = os.environ.get('PYTHONPATH') if PYTHONPATH is None: os.environ['PYTHONPATH'] = r'"{pkg_path}"' else: os.environ['PYTHONPATH'] = r'"{pkg_path}"' + os.path.pathsep + PYTHONPATH -""".format(**locals()) +""".format( + **locals() + ) if print_location: p_loc = f'print({mod_name}.__file__);' else: @@ -146,14 +148,17 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): os.chdir(tmpdir) with open('script.py', 'wt') as fobj: fobj.write( -r""" + r""" import os import sys sys.path.insert(0, r"{pkg_path}") {paths_add} import {mod_name} {p_loc} -{cmd}""".format(**locals())) +{cmd}""".format( + **locals() + ) + ) res = back_tick(f'{PYTHON} script.py', ret_err=True) finally: os.chdir(cwd) @@ -162,7 +167,7 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): def zip_extract_all(fname, path=None): - """ Extract all members from zipfile + """Extract all members from zipfile Deals with situation where the directory is stored in the zipfile as a name, as well as files that have to go into this directory. @@ -176,7 +181,7 @@ def zip_extract_all(fname, path=None): def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): - """ Install package in `from_dir` to standard location in `to_dir` + """Install package in `from_dir` to standard location in `to_dir` Parameters ---------- @@ -191,8 +196,7 @@ def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): subdirectory within `to_dir` to which scripts will be installed """ site_pkgs_path = os.path.join(to_dir, py_lib_sdir) - py_lib_locs = (f' --install-purelib={site_pkgs_path} ' - f'--install-platlib={site_pkgs_path}') + py_lib_locs = f' --install-purelib={site_pkgs_path} ' f'--install-platlib={site_pkgs_path}' pwd = os.path.abspath(os.getcwd()) cmd = f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}' try: @@ -202,10 +206,10 @@ def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): os.chdir(pwd) -def install_from_zip(zip_fname, install_path, pkg_finder=None, - py_lib_sdir=PY_LIB_SDIR, - script_sdir='bin'): - """ Install package from zip file `zip_fname` +def install_from_zip( + zip_fname, install_path, pkg_finder=None, py_lib_sdir=PY_LIB_SDIR, script_sdir='bin' +): + """Install package from zip file `zip_fname` Parameters ---------- @@ -240,7 +244,7 @@ def install_from_zip(zip_fname, install_path, pkg_finder=None, def contexts_print_info(mod_name, repo_path, install_path): - """ Print result of get_info from different installation routes + """Print result of get_info from different installation routes Runs installation from: @@ -280,7 +284,7 @@ def contexts_print_info(mod_name, repo_path, install_path): def info_from_here(mod_name): - """ Run info context checks starting in working directory + """Run info context checks starting in working directory Runs checks from current working directory, installing temporary installations into a new temporary directory @@ -299,7 +303,7 @@ def info_from_here(mod_name): def tests_installed(mod_name, source_path=None): - """ Install from `source_path` into temporary directory; run tests + """Install from `source_path` into temporary directory; run tests Parameters ---------- @@ -315,21 +319,19 @@ def tests_installed(mod_name, source_path=None): scripts_path = pjoin(install_path, 'bin') try: install_from_to(source_path, install_path, PY_LIB_SDIR, 'bin') - stdout, stderr = run_mod_cmd(mod_name, - site_pkgs_path, - mod_name + '.test()', - scripts_path) + stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, mod_name + '.test()', scripts_path) finally: shutil.rmtree(install_path) print(stdout) print(stderr) + # Tell nose this is not a test tests_installed.__test__ = False def check_installed_files(repo_mod_path, install_mod_path): - """ Check files in `repo_mod_path` are installed at `install_mod_path` + """Check files in `repo_mod_path` are installed at `install_mod_path` At the moment, all this does is check that all the ``*.py`` files in `repo_mod_path` are installed at `install_mod_path`. @@ -348,11 +350,11 @@ def check_installed_files(repo_mod_path, install_mod_path): list of files that should have been installed, but have not been installed """ - return missing_from(repo_mod_path, install_mod_path, filter=r"\.py$") + return missing_from(repo_mod_path, install_mod_path, filter=r'\.py$') def missing_from(path0, path1, filter=None): - """ Return filenames present in `path0` but not in `path1` + """Return filenames present in `path0` but not in `path1` Parameters ---------- @@ -386,8 +388,7 @@ def missing_from(path0, path1, filter=None): def check_files(mod_name, repo_path=None, scripts_sdir='bin'): - """ Print library and script files not picked up during install - """ + """Print library and script files not picked up during install""" if repo_path is None: repo_path = abspath(os.getcwd()) install_path = tempfile.mkdtemp() @@ -396,67 +397,60 @@ def check_files(mod_name, repo_path=None, scripts_sdir='bin'): repo_bin = pjoin(repo_path, 'bin') installed_bin = pjoin(install_path, 'bin') try: - zip_fname = make_dist(repo_path, - install_path, - 'sdist --formats=zip', - '*.zip') + zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip') pf = get_sdist_finder(mod_name) install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, scripts_sdir) - lib_misses = missing_from(repo_mod_path, installed_mod_path, r"\.py$") + lib_misses = missing_from(repo_mod_path, installed_mod_path, r'\.py$') script_misses = missing_from(repo_bin, installed_bin) finally: shutil.rmtree(install_path) if lib_misses: - print("Missed library files: ", ', '.join(lib_misses)) + print('Missed library files: ', ', '.join(lib_misses)) else: - print("You got all the library files") + print('You got all the library files') if script_misses: - print("Missed script files: ", ', '.join(script_misses)) + print('Missed script files: ', ', '.join(script_misses)) else: - print("You got all the script files") + print('You got all the script files') return len(lib_misses) > 0 or len(script_misses) > 0 def get_sdist_finder(mod_name): - """ Return function finding sdist source directory for `mod_name` - """ + """Return function finding sdist source directory for `mod_name`""" + def pf(pth): pkg_dirs = glob(pjoin(pth, mod_name + '-*')) if len(pkg_dirs) != 1: raise OSError('There must be one and only one package dir') return pkg_dirs[0] + return pf def sdist_tests(mod_name, repo_path=None, label='fast', doctests=True): - """ Make sdist zip, install from it, and run tests """ + """Make sdist zip, install from it, and run tests""" if repo_path is None: repo_path = abspath(os.getcwd()) install_path = tempfile.mkdtemp() try: - zip_fname = make_dist(repo_path, - install_path, - 'sdist --formats=zip', - '*.zip') + zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip') pf = get_sdist_finder(mod_name) install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, 'bin') site_pkgs_path = pjoin(install_path, PY_LIB_SDIR) script_path = pjoin(install_path, 'bin') cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" - stdout, stderr = run_mod_cmd(mod_name, - site_pkgs_path, - cmd, - script_path) + stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, cmd, script_path) finally: shutil.rmtree(install_path) print(stdout) print(stderr) + sdist_tests.__test__ = False def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True): - """ Make bdist_egg, unzip it, and run tests from result + """Make bdist_egg, unzip it, and run tests from result We've got a problem here, because the egg does not contain the scripts, and so, if we are testing the scripts with ``mod.test()``, we won't pick up the @@ -472,26 +466,21 @@ def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True): install_path = tempfile.mkdtemp() scripts_path = pjoin(install_path, 'bin') try: - zip_fname = make_dist(repo_path, - install_path, - 'bdist_egg', - '*.egg') + zip_fname = make_dist(repo_path, install_path, 'bdist_egg', '*.egg') zip_extract_all(zip_fname, install_path) cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" - stdout, stderr = run_mod_cmd(mod_name, - install_path, - cmd, - scripts_path) + stdout, stderr = run_mod_cmd(mod_name, install_path, cmd, scripts_path) finally: shutil.rmtree(install_path) print(stdout) print(stderr) + bdist_egg_tests.__test__ = False def make_dist(repo_path, out_dir, setup_params, zipglob): - """ Create distutils distribution file + """Create distutils distribution file Parameters ---------- @@ -525,8 +514,10 @@ def make_dist(repo_path, out_dir, setup_params, zipglob): back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}') zips = glob(pjoin(out_dir, zipglob)) if len(zips) != 1: - raise OSError(f"There must be one and only one {zipglob} " - f"file, but I found \"{': '.join(zips)}\"") + raise OSError( + f'There must be one and only one {zipglob} ' + f"file, but I found \"{': '.join(zips)}\"" + ) finally: os.chdir(pwd) return zips[0] diff --git a/nisext/tests/test_sexts.py b/nisext/tests/test_sexts.py index 22d6ce7a9b..f262ec5685 100644 --- a/nisext/tests/test_sexts.py +++ b/nisext/tests/test_sexts.py @@ -1,13 +1,13 @@ -""" Tests for nisexts.sexts module +"""Tests for nisexts.sexts module """ import sys import types -from ..sexts import package_check - import pytest +from ..sexts import package_check + FAKE_NAME = 'nisext_improbable' assert FAKE_NAME not in sys.modules FAKE_MODULE = types.ModuleType('nisext_fake') @@ -44,10 +44,12 @@ def test_package_check_setuptools(): # If setuptools arg not None, missing package just adds it to arg with pytest.raises(RuntimeError): package_check(FAKE_NAME, setuptools_args=None) + def pkg_chk_sta(*args, **kwargs): st_args = {} package_check(*args, setuptools_args=st_args, **kwargs) return st_args + assert pkg_chk_sta(FAKE_NAME) == {'install_requires': ['nisext_improbable']} # Check that this gets appended to existing value old_sta = {'install_requires': ['something']} @@ -58,7 +60,9 @@ def pkg_chk_sta(*args, **kwargs): package_check(FAKE_NAME, setuptools_args=old_sta) assert old_sta == {'install_requires': ['something', 'nisext_improbable']} # Optional, add to extras_require - assert pkg_chk_sta(FAKE_NAME, optional='something') == {'extras_require': {'something': ['nisext_improbable']}} + assert pkg_chk_sta(FAKE_NAME, optional='something') == { + 'extras_require': {'something': ['nisext_improbable']} + } # Check that this gets appended to existing value old_sta = {'extras_require': {'something': ['amodule']}} package_check(FAKE_NAME, optional='something', setuptools_args=old_sta) @@ -66,8 +70,7 @@ def pkg_chk_sta(*args, **kwargs): # That string gets converted to a list here too old_sta = {'extras_require': {'something': 'amodule'}} package_check(FAKE_NAME, optional='something', setuptools_args=old_sta) - assert old_sta == {'extras_require': - {'something': ['amodule', 'nisext_improbable']}} + assert old_sta == {'extras_require': {'something': ['amodule', 'nisext_improbable']}} # But optional has to be a string if not empty and setuptools_args defined with pytest.raises(RuntimeError): package_check(FAKE_NAME, optional=True, setuptools_args={}) @@ -84,21 +87,20 @@ def pkg_chk_sta(*args, **kwargs): assert pkg_chk_sta(FAKE_NAME, version='0.3') == {'install_requires': exp_spec} # Unless optional in which case goes into extras_require package_check(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') - assert pkg_chk_sta(FAKE_NAME, version='0.3', optional='afeature') == {'extras_require': {'afeature': exp_spec}} + assert pkg_chk_sta(FAKE_NAME, version='0.3', optional='afeature') == { + 'extras_require': {'afeature': exp_spec} + } # Might do custom version check - assert pkg_chk_sta(FAKE_NAME, - version='0.2', - version_getter=lambda x: '0.2') == {} + assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') == {} # If the version check fails, put into requires bad_getter = lambda x: x.not_an_attribute exp_spec = [FAKE_NAME + '>=0.2'] - assert pkg_chk_sta(FAKE_NAME, - version='0.2', - version_getter=bad_getter) == {'install_requires': exp_spec} + assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=bad_getter) == { + 'install_requires': exp_spec + } # Likewise for optional dependency - assert pkg_chk_sta(FAKE_NAME, - version='0.2', - optional='afeature', - version_getter=bad_getter) == {'extras_require': {'afeature': [FAKE_NAME + '>=0.2']}} + assert pkg_chk_sta( + FAKE_NAME, version='0.2', optional='afeature', version_getter=bad_getter + ) == {'extras_require': {'afeature': [FAKE_NAME + '>=0.2']}} finally: del sys.modules[FAKE_NAME] diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py index c2e9e021f4..f81a40f1df 100644 --- a/nisext/tests/test_testers.py +++ b/nisext/tests/test_testers.py @@ -1,19 +1,19 @@ -""" Tests for testers +"""Tests for testers """ import os from os.path import dirname, pathsep -from ..testers import back_tick, run_mod_cmd, PYTHON - import pytest +from ..testers import PYTHON, back_tick, run_mod_cmd + def test_back_tick(): cmd = f'{PYTHON} -c "print(\'Hello\')"' - assert back_tick(cmd) == "Hello" - assert back_tick(cmd, ret_err=True) == ("Hello", "") - assert back_tick(cmd, True, False) == (b"Hello", b"") + assert back_tick(cmd) == 'Hello' + assert back_tick(cmd, ret_err=True) == ('Hello', '') + assert back_tick(cmd, True, False) == (b'Hello', b'') cmd = f'{PYTHON} -c "raise ValueError()"' with pytest.raises(RuntimeError): back_tick(cmd) @@ -22,7 +22,7 @@ def test_back_tick(): def test_run_mod_cmd(): mod = 'os' mod_dir = dirname(os.__file__) - assert run_mod_cmd(mod, mod_dir, "print('Hello')", None, False) == ("Hello", "") + assert run_mod_cmd(mod, mod_dir, "print('Hello')", None, False) == ('Hello', '') sout, serr = run_mod_cmd(mod, mod_dir, "print('Hello again')") assert serr == '' mod_file, out_str = [s.strip() for s in sout.split('\n')] diff --git a/setup.py b/setup.py index 4b9bde35b0..2a2f8d8e21 100755 --- a/setup.py +++ b/setup.py @@ -21,10 +21,10 @@ pip install --upgrade build python -m build - """ from setuptools import setup + import versioneer setup( diff --git a/tools/bisect_nose.py b/tools/bisect_nose.py index 3f9092564b..7036e0b9cc 100755 --- a/tools/bisect_nose.py +++ b/tools/bisect_nose.py @@ -1,9 +1,8 @@ #!/usr/bin/env python -""" Utility for git-bisecting nose failures +"""Utility for git-bisecting nose failures """ DESCRIP = 'Check nose output for given text, set sys exit for git bisect' -EPILOG = \ -""" +EPILOG = """ Imagine you've just detected a nose test failure. The failure is in a particular test or test module - here 'test_analyze.py'. The failure *is* in git branch ``main-master`` but it *is not* in tag ``v1.6.1``. Then you can @@ -37,14 +36,13 @@ on the python path. """ import os -import sys +import re import shutil +import sys import tempfile -import re -from functools import partial -from subprocess import check_call, Popen, PIPE, CalledProcessError - from argparse import ArgumentParser, RawDescriptionHelpFormatter +from functools import partial +from subprocess import PIPE, CalledProcessError, Popen, check_call caller = partial(check_call, shell=True) popener = partial(Popen, stdout=PIPE, stderr=PIPE, shell=True) @@ -63,31 +61,27 @@ def call_or_untestable(cmd): def main(): - parser = ArgumentParser(description=DESCRIP, - epilog=EPILOG, - formatter_class=RawDescriptionHelpFormatter) - parser.add_argument('test_path', type=str, - help='Path to test') - parser.add_argument('--error-txt', type=str, - help='regular expression for error of interest') - parser.add_argument('--clean', action='store_true', - help='Clean git tree before running tests') - parser.add_argument('--build', action='store_true', - help='Build git tree before running tests') + parser = ArgumentParser( + description=DESCRIP, epilog=EPILOG, formatter_class=RawDescriptionHelpFormatter + ) + parser.add_argument('test_path', type=str, help='Path to test') + parser.add_argument('--error-txt', type=str, help='regular expression for error of interest') + parser.add_argument('--clean', action='store_true', help='Clean git tree before running tests') + parser.add_argument('--build', action='store_true', help='Build git tree before running tests') # parse the command line args = parser.parse_args() path = os.path.abspath(args.test_path) if args.clean: - print("Cleaning") + print('Cleaning') call_or_untestable('git clean -fxd') if args.build: - print("Building") + print('Building') call_or_untestable('python setup.py build_ext -i') cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: os.chdir(tmpdir) - print("Testing") + print('Testing') proc = popener('nosetests ' + path) stdout, stderr = proc.communicate() finally: diff --git a/tools/ci/activate.sh b/tools/ci/activate.sh index ebef3b650b..567e13a67b 100644 --- a/tools/ci/activate.sh +++ b/tools/ci/activate.sh @@ -7,4 +7,3 @@ else ls -R virtenv false fi - diff --git a/tools/dicomfs.wsgi b/tools/dicomfs.wsgi index f8b1505357..bd2480a647 100644 --- a/tools/dicomfs.wsgi +++ b/tools/dicomfs.wsgi @@ -8,11 +8,11 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove +import cgi import sys import traceback -from functools import partial import urllib -import cgi +from functools import partial import jinja2 @@ -24,7 +24,7 @@ BASE_DIR = None # default setting for whether to follow symlinks in BASE_DIR. Python 2.5 only # accepts False for this setting, Python >= 2.6 accepts True or False -FOLLOWLINKS=False +FOLLOWLINKS = False # Define routine to get studies studies_getter = partial(dft.get_studies, followlinks=FOLLOWLINKS) @@ -118,13 +118,14 @@ Study comments: {{ study.comments }} """ -class HandlerError: +class HandlerError: def __init__(self, status, output): self.status = status self.output = output return + def application(environ, start_response): try: (status, c_type, output) = handler(environ) @@ -138,13 +139,13 @@ def application(environ, start_response): status = '500 Internal Server Error' output = ''.join(lines) c_type = 'text/plain' - response_headers = [('Content-Type', c_type), - ('Content-Length', str(len(output)))] + response_headers = [('Content-Type', c_type), ('Content-Length', str(len(output)))] if c_type == 'image/nifti': response_headers.append(('Content-Disposition', 'attachment; filename=image.nii')) start_response(status, response_headers) return [output] + def handler(environ): if environ['PATH_INFO'] == '' or environ['PATH_INFO'] == '/': return ('200 OK', 'text/html', index(environ)) @@ -158,7 +159,8 @@ def handler(environ): return ('200 OK', 'image/nifti', nifti(parts[0], parts[1], parts[2])) elif parts[3] == 'png': return ('200 OK', 'image/png', png(parts[0], parts[1], parts[2])) - raise HandlerError('404 Not Found', "%s not found\n" % environ['PATH_INFO']) + raise HandlerError('404 Not Found', '%s not found\n' % environ['PATH_INFO']) + def study_cmp(a, b): if a.date < b.date: @@ -171,6 +173,7 @@ def study_cmp(a, b): return 1 return 0 + def index(environ): patients = {} for s in studies_getter(BASE_DIR): @@ -178,14 +181,16 @@ def index(environ): template = template_env.from_string(index_template) return template.render(patients=patients).encode('utf-8') + def patient(patient): - studies = [ s for s in studies_getter() if s.patient_name_or_uid() == patient ] + studies = [s for s in studies_getter() if s.patient_name_or_uid() == patient] if len(studies) == 0: raise HandlerError('404 Not Found', 'patient %s not found\n' % patient) studies.sort(study_cmp) template = template_env.from_string(patient_template) return template.render(studies=studies).encode('utf-8') + def patient_date_time(patient, date_time): study = None for s in studies_getter(): @@ -200,6 +205,7 @@ def patient_date_time(patient, date_time): template = template_env.from_string(patient_date_time_template) return template.render(study=study).encode('utf-8') + def nifti(patient, date_time, scan): study = None for s in studies_getter(): @@ -221,6 +227,7 @@ def nifti(patient, date_time, scan): raise HandlerError('404 Not Found', 'series not found') return ser.as_nifti() + def png(patient, date_time, scan): study = None for s in studies_getter(): @@ -243,8 +250,10 @@ def png(patient, date_time, scan): index = len(ser.storage_instances) / 2 return ser.as_png(index, True) + if __name__ == '__main__': import wsgiref.simple_server + httpd = wsgiref.simple_server.make_server('', 8080, application) httpd.serve_forever() diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py index 156976daf5..cabff5c0af 100755 --- a/tools/gitwash_dumper.py +++ b/tools/gitwash_dumper.py @@ -1,16 +1,16 @@ #!/usr/bin/env python -""" Checkout gitwash repo into directory and do search replace on name """ +"""Checkout gitwash repo into directory and do search replace on name""" +import fnmatch +import glob import os -from os.path import join as pjoin +import re import shutil import sys -import re -import glob -import fnmatch import tempfile -from subprocess import call from optparse import OptionParser +from os.path import join as pjoin +from subprocess import call verbose = False @@ -51,9 +51,7 @@ def cp_files(in_path, globs, out_path): def filename_search_replace(sr_pairs, filename, backup=False): - """ Search and replace for expressions in files - - """ + """Search and replace for expressions in files""" in_txt = open(filename, 'rt').read(-1) out_txt = in_txt[:] for in_exp, out_exp in sr_pairs: @@ -67,12 +65,9 @@ def filename_search_replace(sr_pairs, filename, backup=False): return True -def copy_replace(replace_pairs, - repo_path, - out_path, - cp_globs=('*',), - rep_globs=('*',), - renames = ()): +def copy_replace( + replace_pairs, repo_path, out_path, cp_globs=('*',), rep_globs=('*',), renames=() +): out_fnames = cp_files(repo_path, cp_globs, out_path) renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames] fnames = [] @@ -89,14 +84,10 @@ def copy_replace(replace_pairs, break -def make_link_targets(proj_name, - user_name, - repo_name, - known_link_fname, - out_link_fname, - url=None, - ml_url=None): - """ Check and make link targets +def make_link_targets( + proj_name, user_name, repo_name, known_link_fname, out_link_fname, url=None, ml_url=None +): + """Check and make link targets If url is None or ml_url is None, check if there are links present for these in `known_link_fname`. If not, raise error. The check is: @@ -132,8 +123,7 @@ def make_link_targets(proj_name, if match: have_gh_url = True if not have_url or not have_ml_url: - raise RuntimeError('Need command line or known project ' - 'and / or mailing list URLs') + raise RuntimeError('Need command line or known project and / or mailing list URLs') lines = [] if not url is None: lines.append(f'.. _{proj_name}: {url}\n') @@ -168,32 +158,50 @@ def make_link_targets(proj_name, def main(): parser = OptionParser() parser.set_usage(parser.get_usage().strip() + USAGE) - parser.add_option("--repo-name", dest="repo_name", - help="repository name - e.g. nitime", - metavar="REPO_NAME") - parser.add_option("--github-user", dest="main_gh_user", - help="github username for main repo - e.g fperez", - metavar="MAIN_GH_USER") - parser.add_option("--gitwash-url", dest="gitwash_url", - help=f"URL to gitwash repository - default {GITWASH_CENTRAL}", - default=GITWASH_CENTRAL, - metavar="GITWASH_URL") - parser.add_option("--gitwash-branch", dest="gitwash_branch", - help=f"branch in gitwash repository - default {GITWASH_BRANCH}", - default=GITWASH_BRANCH, - metavar="GITWASH_BRANCH") - parser.add_option("--source-suffix", dest="source_suffix", - help="suffix of ReST source files - default '.rst'", - default='.rst', - metavar="SOURCE_SUFFIX") - parser.add_option("--project-url", dest="project_url", - help="URL for project web pages", - default=None, - metavar="PROJECT_URL") - parser.add_option("--project-ml-url", dest="project_ml_url", - help="URL for project mailing list", - default=None, - metavar="PROJECT_ML_URL") + parser.add_option( + '--repo-name', dest='repo_name', help='repository name - e.g. nitime', metavar='REPO_NAME' + ) + parser.add_option( + '--github-user', + dest='main_gh_user', + help='github username for main repo - e.g fperez', + metavar='MAIN_GH_USER', + ) + parser.add_option( + '--gitwash-url', + dest='gitwash_url', + help=f'URL to gitwash repository - default {GITWASH_CENTRAL}', + default=GITWASH_CENTRAL, + metavar='GITWASH_URL', + ) + parser.add_option( + '--gitwash-branch', + dest='gitwash_branch', + help=f'branch in gitwash repository - default {GITWASH_BRANCH}', + default=GITWASH_BRANCH, + metavar='GITWASH_BRANCH', + ) + parser.add_option( + '--source-suffix', + dest='source_suffix', + help="suffix of ReST source files - default '.rst'", + default='.rst', + metavar='SOURCE_SUFFIX', + ) + parser.add_option( + '--project-url', + dest='project_url', + help='URL for project web pages', + default=None, + metavar='PROJECT_URL', + ) + parser.add_option( + '--project-ml-url', + dest='project_ml_url', + help='URL for project mailing list', + default=None, + metavar='PROJECT_ML_URL', + ) (options, args) = parser.parse_args() if len(args) < 2: parser.print_help() @@ -205,21 +213,27 @@ def main(): options.main_gh_user = options.repo_name repo_path = clone_repo(options.gitwash_url, options.gitwash_branch) try: - copy_replace((('PROJECTNAME', project_name), - ('REPONAME', options.repo_name), - ('MAIN_GH_USER', options.main_gh_user)), - repo_path, - out_path, - cp_globs=(pjoin('gitwash', '*'),), - rep_globs=('*.rst',), - renames=(('\.rst$', options.source_suffix),)) - make_link_targets(project_name, - options.main_gh_user, - options.repo_name, - pjoin(out_path, 'gitwash', 'known_projects.inc'), - pjoin(out_path, 'gitwash', 'this_project.inc'), - options.project_url, - options.project_ml_url) + copy_replace( + ( + ('PROJECTNAME', project_name), + ('REPONAME', options.repo_name), + ('MAIN_GH_USER', options.main_gh_user), + ), + repo_path, + out_path, + cp_globs=(pjoin('gitwash', '*'),), + rep_globs=('*.rst',), + renames=(('\.rst$', options.source_suffix),), + ) + make_link_targets( + project_name, + options.main_gh_user, + options.repo_name, + pjoin(out_path, 'gitwash', 'known_projects.inc'), + pjoin(out_path, 'gitwash', 'this_project.inc'), + options.project_url, + options.project_ml_url, + ) finally: shutil.rmtree(repo_path) diff --git a/tools/make_tarball.py b/tools/make_tarball.py index afbde3d48d..3cdad40d0b 100755 --- a/tools/make_tarball.py +++ b/tools/make_tarball.py @@ -2,10 +2,10 @@ """Simple script to create a tarball with proper git info. """ -import commands import os -from toollib import * +import commands +from toollib import * tag = commands.getoutput('git describe') base_name = f'nibabel-{tag}' diff --git a/tools/mpkg_wrapper.py b/tools/mpkg_wrapper.py index d79f84caad..0a96156e4d 100644 --- a/tools/mpkg_wrapper.py +++ b/tools/mpkg_wrapper.py @@ -17,6 +17,7 @@ import sys + def main(): del sys.argv[0] sys.argv.insert(1, 'bdist_mpkg') @@ -25,5 +26,6 @@ def main(): g['__name__'] = '__main__' execfile(sys.argv[0], g, g) + if __name__ == '__main__': main() diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py index 86897fca8e..06b2dbf828 100755 --- a/tools/prep_zenodo.py +++ b/tools/prep_zenodo.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 -import git import json -from subprocess import run, PIPE from pathlib import Path +from subprocess import PIPE, run + +import git skip = {'nibotmi'} @@ -17,12 +18,10 @@ def decommify(name): zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {} orig_creators = zenodo.get('creators', []) -creator_map = {decommify(creator['name']): creator - for creator in orig_creators} +creator_map = {decommify(creator['name']): creator for creator in orig_creators} shortlog = run(['git', 'shortlog', '-ns'], stdout=PIPE) -counts = [line.split('\t', 1)[::-1] - for line in shortlog.stdout.decode().split('\n') if line] +counts = [line.split('\t', 1)[::-1] for line in shortlog.stdout.decode().split('\n') if line] commit_counts = {} for committer, commits in counts: @@ -31,15 +30,15 @@ def decommify(name): # Stable sort: # Number of commits in reverse order # Ties broken by alphabetical order of first name -committers = [committer - for committer, _ in sorted(commit_counts.items(), - key=lambda x: (-x[1], x[0]))] +committers = [ + committer for committer, _ in sorted(commit_counts.items(), key=lambda x: (-x[1], x[0])) +] creators = [ creator_map.get(committer, {'name': committer}) for committer in committers if committer not in skip - ] +] zenodo['creators'] = creators zenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True) + '\n') diff --git a/tools/profile b/tools/profile index b17ac454cb..cc13d773bc 100755 --- a/tools/profile +++ b/tools/profile @@ -1,6 +1,6 @@ #!/usr/bin/python -#emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -#ex: set sts=4 ts=4 sw=4 et: +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# ex: set sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the NiBabel package for the @@ -15,10 +15,13 @@ import os import sys from os import path -if __name__ == "__main__": +if __name__ == '__main__': - usage = """Usage: %s [options] ... - """ % sys.argv[0] + usage = ( + """Usage: %s [options] ... + """ + % sys.argv[0] + ) # default options convert2kcache = True @@ -37,24 +40,24 @@ if __name__ == "__main__": sys.exit(1) while sys.argv[0].startswith('-'): - if sys.argv[0] in ["-l", "--level"]: + if sys.argv[0] in ['-l', '--level']: profilelevel = int(sys.argv[1]) sys.argv.pop(0) - elif sys.argv[0] in ["-o", "--output-file"]: + elif sys.argv[0] in ['-o', '--output-file']: pfilename = sys.argv[1] sys.argv.pop(0) - elif sys.argv[0] in ["-O", "--output-statsfile"]: + elif sys.argv[0] in ['-O', '--output-statsfile']: pstatsfilename = sys.argv[1] sys.argv.pop(0) - elif sys.argv[0] in ["-s", "--stats"]: + elif sys.argv[0] in ['-s', '--stats']: printstats = True convert2kcache = False displaykcachegrinder = False - elif sys.argv[0] in ["-n", "--no-run"]: + elif sys.argv[0] in ['-n', '--no-run']: run = False - elif sys.argv[0] in ["-P", "--no-profilelines"]: + elif sys.argv[0] in ['-P', '--no-profilelines']: profilelines = False - elif sys.argv[0] in ["-K", "--no-kcache"]: + elif sys.argv[0] in ['-K', '--no-kcache']: convert2kcache = False displaykcachegrinder = False else: @@ -75,13 +78,13 @@ if __name__ == "__main__": raise RuntimeError('No hotshot') if pfilename is None: - pfilename = cmdname + ".prof" + pfilename = cmdname + '.prof' if run: - exec(f"import {root} as runnable") + exec(f'import {root} as runnable') if not 'main' in runnable.__dict__: - print(f"OOPS: file/module {cmdname} has no function main defined") + print(f'OOPS: file/module {cmdname} has no function main defined') sys.exit(1) prof = hotshot.Profile(pfilename, lineevents=profilelines) @@ -89,16 +92,17 @@ if __name__ == "__main__": try: # actually return values are never setup # since unittest.main sys.exit's - results = prof.runcall( runnable.main ) + results = prof.runcall(runnable.main) except SystemExit: pass - print(f"Saving profile data into {pfilename}") + print(f'Saving profile data into {pfilename}') prof.close() if printstats or pstatsfilename: import hotshot.stats - print("Loading profile file to print statistics") + + print('Loading profile file to print statistics') stats = hotshot.stats.load(pfilename) if printstats: stats.strip_dirs() @@ -107,17 +111,17 @@ if __name__ == "__main__": if pstatsfilename: stats.dump_stats(pstatsfilename) - kfilename = pfilename + ".kcache" + kfilename = pfilename + '.kcache' if convert2kcache: - cmd = "hotshot2calltree -o %s %s" % (kfilename, pfilename) + cmd = 'hotshot2calltree -o %s %s' % (kfilename, pfilename) if os.system(cmd): - print("!!! Make sure to install kcachegrind-converters ;-)") + print('!!! Make sure to install kcachegrind-converters ;-)') sys.exit(1) if displaykcachegrinder: if os.system('kcachegrind %s' % kfilename): - print("!!! Make sure to install kcachegrind ;-)") + print('!!! Make sure to install kcachegrind ;-)') sys.exit(1) else: - print("Go away -- nothing to look here for as a module") + print('Go away -- nothing to look here for as a module') diff --git a/tools/refresh_readme.py b/tools/refresh_readme.py index 577c10bd36..0567a994ba 100755 --- a/tools/refresh_readme.py +++ b/tools/refresh_readme.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -""" Refresh README.rst file from long description +"""Refresh README.rst file from long description Should be run from nibabel root (containing setup.py) """ diff --git a/tools/update_requirements.py b/tools/update_requirements.py index c624d9a8f8..eb0343bd78 100755 --- a/tools/update_requirements.py +++ b/tools/update_requirements.py @@ -1,35 +1,36 @@ #!/usr/bin/env python3 import sys -import tomli from pathlib import Path +import tomli + if sys.version_info < (3, 6): - print("This script requires Python 3.6 to work correctly") + print('This script requires Python 3.6 to work correctly') sys.exit(1) repo_root = Path(__file__).parent.parent -pyproject_toml = repo_root / "pyproject.toml" -reqs = repo_root / "requirements.txt" -min_reqs = repo_root / "min-requirements.txt" -doc_reqs = repo_root / "doc-requirements.txt" +pyproject_toml = repo_root / 'pyproject.toml' +reqs = repo_root / 'requirements.txt' +min_reqs = repo_root / 'min-requirements.txt' +doc_reqs = repo_root / 'doc-requirements.txt' with open(pyproject_toml, 'rb') as fobj: config = tomli.load(fobj) -requirements = config["project"]["dependencies"] -doc_requirements = config["project"]["optional-dependencies"]["doc"] +requirements = config['project']['dependencies'] +doc_requirements = config['project']['optional-dependencies']['doc'] script_name = Path(__file__).relative_to(repo_root) -lines = [f"# Auto-generated by {script_name}", ""] +lines = [f'# Auto-generated by {script_name}', ''] # Write requirements lines[1:-1] = requirements -reqs.write_text("\n".join(lines)) +reqs.write_text('\n'.join(lines)) # Write minimum requirements -lines[1:-1] = [req.replace(">=", "==").replace("~=", "==") for req in requirements] -min_reqs.write_text("\n".join(lines)) +lines[1:-1] = [req.replace('>=', '==').replace('~=', '==') for req in requirements] +min_reqs.write_text('\n'.join(lines)) # Write documentation requirements -lines[1:-1] = ["-r requirements.txt"] + doc_requirements -doc_reqs.write_text("\n".join(lines)) +lines[1:-1] = ['-r requirements.txt'] + doc_requirements +doc_reqs.write_text('\n'.join(lines)) diff --git a/tools/valgrind-python.supp b/tools/valgrind-python.supp index ef41c6edbe..4679470082 100644 --- a/tools/valgrind-python.supp +++ b/tools/valgrind-python.supp @@ -437,5 +437,3 @@ obj:/lib/ld-2.7.so obj:/lib/i686/cmov/libdl-2.7.so } - - From ab00ba51b39f45e3661a108ef34cbab2e894e343 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 16:07:18 -0500 Subject: [PATCH 149/702] MNT: Update .git-blame-ignore-revs --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 78015d8cf1..d0546f627f 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,3 +1,5 @@ +# Sun Jan 1 12:38:02 2023 -0500 - effigies@gmail.com - STY: Run pre-commit config on all files +d14c1cf282a9c3b19189f490f10c35f5739e24d1 # Thu Dec 29 22:53:17 2022 -0500 - effigies@gmail.com - STY: Reduce array().astype() and similar constructs bf298113da99079c9c7b5e1690e41879828cd472 # Thu Dec 29 22:32:46 2022 -0500 - effigies@gmail.com - STY: pyupgrade --py37-plus From 9000943e07ed65ccb8439381aa85e8ecb03428cc Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 2 Jan 2023 12:15:37 -0500 Subject: [PATCH 150/702] MNT: Add flake8 to pre-commit, run prettier --- .pre-commit-config.yaml | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8c884eb2cc..e99b9570d6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,17 +1,17 @@ -exclude: '.*/data/.*' +exclude: ".*/data/.*" repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-json - - id: check-toml - - id: check-added-large-files - - id: check-case-conflict - - id: check-merge-conflict - - id: check-vcs-permalinks + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-json + - id: check-toml + - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-vcs-permalinks - repo: https://github.com/grantjenks/blue rev: v0.9.1 hooks: @@ -20,3 +20,8 @@ repos: rev: 5.11.2 hooks: - id: isort + - repo: https://github.com/pycqa/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + exclude: "^(doc|nisext|tools)/" From 290e34a1ef44ac2dd2b1d4c71fb3e0b8f68b0b29 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 01:23:05 -0500 Subject: [PATCH 151/702] MNT: Officially deprecate nisext --- nisext/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nisext/__init__.py b/nisext/__init__.py index b556c66d13..6b19d7eb8e 100644 --- a/nisext/__init__.py +++ b/nisext/__init__.py @@ -4,3 +4,10 @@ nibabel uses these routines, and houses them, and installs them. nipy-proper and dipy use them. """ + +import warnings + +warnings.warn( + """The nisext package is deprecated as of NiBabel 5.0 and will be fully +removed in NiBabel 6.0""" +) From 122ba9a1b48ce3017bea9895b4fe00cce00766af Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 2 Jan 2023 12:03:58 -0500 Subject: [PATCH 152/702] MNT: Remove versioneer Rename remaining setup.cfg to .flake8 --- setup.cfg => .flake8 | 8 - .gitattributes | 1 - MANIFEST.in | 2 - nibabel/_version.py | 666 ------------- setup.py | 33 - versioneer.py | 2156 ------------------------------------------ 6 files changed, 2866 deletions(-) rename setup.cfg => .flake8 (52%) delete mode 100644 nibabel/_version.py delete mode 100755 setup.py delete mode 100644 versioneer.py diff --git a/setup.cfg b/.flake8 similarity index 52% rename from setup.cfg rename to .flake8 index 4c955c37e1..9fe631ac81 100644 --- a/setup.cfg +++ b/.flake8 @@ -7,11 +7,3 @@ exclude = nibabel/externals/* per-file-ignores = */__init__.py: F401 - -[versioneer] -VCS = git -style = pep440 -versionfile_source = nibabel/_version.py -versionfile_build = nibabel/_version.py -tag_prefix = -parentdir_prefix = diff --git a/.gitattributes b/.gitattributes index 9f3e8c9167..e69de29bb2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +0,0 @@ -nibabel/_version.py export-subst diff --git a/MANIFEST.in b/MANIFEST.in index 381cab34a5..c40f6110bf 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,5 +3,3 @@ include Changelog TODO requirements.txt recursive-include doc * recursive-include bin * recursive-include tools * -include versioneer.py -include nibabel/_version.py diff --git a/nibabel/_version.py b/nibabel/_version.py deleted file mode 100644 index 763dfca168..0000000000 --- a/nibabel/_version.py +++ /dev/null @@ -1,666 +0,0 @@ - -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys -import runpy -from typing import Callable, Dict -import functools - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - git_date = "$Format:%ci$" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "" - cfg.parentdir_prefix = "" - cfg.versionfile_source = "nibabel/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY: Dict[str, str] = {} -HANDLERS: Dict[str, Dict[str, Callable]] = {} - - -def register_vcs_handler(vcs, method): # decorator - """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - process = None - - popen_kwargs = {} - if sys.platform == "win32": - # This hides the console window if pythonw.exe is used - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - popen_kwargs["startupinfo"] = startupinfo - - for command in commands: - try: - dispcmd = str([command] + args) - # remember shell=False, so use git.cmd on windows, not just git - process = subprocess.Popen([command] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None), **popen_kwargs) - break - except OSError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = process.communicate()[0].strip().decode() - if process.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, process.returncode - return stdout, process.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for _ in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - with open(versionfile_abs, "r") as fobj: - for line in fobj: - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - except OSError: - pass - # CJM: Nibabel hack to ensure we can git-archive off-release versions and - # revert to old X.Y.Zdev versions + githash - try: - rel = runpy.run_path(os.path.join(os.path.dirname(versionfile_abs), "info.py")) - keywords["fallback"] = rel["VERSION"] - except (FileNotFoundError, KeyError): - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if "refnames" not in keywords: - raise NotThisMethod("Short version file found") - date = keywords.get("date") - if date is not None: - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = {r.strip() for r in refnames.strip("()").split(",")} - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = {r for r in refs if re.search(r'\d', r)} - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - # Filter out refs that exactly match prefix or that don't start - # with a number once the prefix is stripped (mostly a concern - # when prefix is '') - if not re.match(r'\d', r): - continue - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so inspect ./info.py - if verbose: - print("no suitable tags, falling back to info.VERSION or 0+unknown") - return {"version": keywords.get("fallback", "0+unknown"), - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - # GIT_DIR can interfere with correct operation of Versioneer. - # It may be intended to be passed to the Versioneer-versioned project, - # but that should not change where we get our version from. - env = os.environ.copy() - env.pop("GIT_DIR", None) - runner = functools.partial(runner, env=env) - - _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else [] - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", *MATCH_ARGS], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], - cwd=root) - # --abbrev-ref was added in git-1.6.3 - if rc != 0 or branch_name is None: - raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") - branch_name = branch_name.strip() - - if branch_name == "HEAD": - # If we aren't exactly on a branch, pick a branch which represents - # the current commit. If all else fails, we are on a branchless - # commit. - branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) - # --contains was added in git-1.5.4 - if rc != 0 or branches is None: - raise NotThisMethod("'git branch --contains' returned error") - branches = branches.split("\n") - - # Remove the first line if we're running detached - if "(" in branches[0]: - branches.pop(0) - - # Strip off the leading "* " from the list of branches. - branches = [branch[2:] for branch in branches] - if "master" in branches: - branch_name = "master" - elif not branches: - branch_name = None - else: - # Pick the first branch that is returned. Good or bad. - branch_name = branches[0] - - pieces["branch"] = branch_name - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparsable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_branch(pieces): - """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . - - The ".dev0" means not master branch. Note that .dev0 sorts backwards - (a feature branch will appear "older" than the master branch). - - Exceptions: - 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0" - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def pep440_split_post(ver): - """Split pep440 version string at the post-release segment. - - Returns the release segments before the post-release and the - post-release version number (or -1 if no post-release segment is present). - """ - vc = str.split(ver, ".post") - return vc[0], int(vc[1] or 0) if len(vc) == 2 else None - - -def render_pep440_pre(pieces): - """TAG[.postN.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post0.devDISTANCE - """ - if pieces["closest-tag"]: - if pieces["distance"]: - # update the post release segment - tag_version, post_version = pep440_split_post(pieces["closest-tag"]) - rendered = tag_version - if post_version is not None: - rendered += ".post%d.dev%d" % (post_version+1, pieces["distance"]) - else: - rendered += ".post0.dev%d" % (pieces["distance"]) - else: - # no commits, use the tag as the version - rendered = pieces["closest-tag"] - else: - # exception #1 - rendered = "0.post0.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_post_branch(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . - - The ".dev0" means not master branch. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-branch": - rendered = render_pep440_branch(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-post-branch": - rendered = render_pep440_post_branch(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for _ in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} diff --git a/setup.py b/setup.py deleted file mode 100755 index 2a2f8d8e21..0000000000 --- a/setup.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Setuptools entrypoint - -This file is a basic stub needed to integrate with versioneer, which allows the -version to be retrieved from git and set statically in a built package. - -This file should not be run directly. To install, use: - - pip install . - -To build a package for distribution, use: - - pip install --upgrade build - python -m build -""" - -from setuptools import setup - -import versioneer - -setup( - version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), -) diff --git a/versioneer.py b/versioneer.py deleted file mode 100644 index a7a9d6b84f..0000000000 --- a/versioneer.py +++ /dev/null @@ -1,2156 +0,0 @@ - -# Version: 0.22 - -"""The Versioneer - like a rocketeer, but for versions. - -The Versioneer -============== - -* like a rocketeer, but for versions! -* https://github.com/python-versioneer/python-versioneer -* Brian Warner -* License: Public Domain -* Compatible with: Python 3.6, 3.7, 3.8, 3.9, 3.10 and pypy3 -* [![Latest Version][pypi-image]][pypi-url] -* [![Build Status][travis-image]][travis-url] - -This is a tool for managing a recorded version number in distutils/setuptools-based -python projects. The goal is to remove the tedious and error-prone "update -the embedded version string" step from your release process. Making a new -release should be as easy as recording a new tag in your version-control -system, and maybe making new tarballs. - - -## Quick Install - -* `pip install versioneer` to somewhere in your $PATH -* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) -* run `versioneer install` in your source tree, commit the results -* Verify version information with `python setup.py version` - -## Version Identifiers - -Source trees come from a variety of places: - -* a version-control system checkout (mostly used by developers) -* a nightly tarball, produced by build automation -* a snapshot tarball, produced by a web-based VCS browser, like github's - "tarball from tag" feature -* a release tarball, produced by "setup.py sdist", distributed through PyPI - -Within each source tree, the version identifier (either a string or a number, -this tool is format-agnostic) can come from a variety of places: - -* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows - about recent "tags" and an absolute revision-id -* the name of the directory into which the tarball was unpacked -* an expanded VCS keyword ($Id$, etc) -* a `_version.py` created by some earlier build step - -For released software, the version identifier is closely related to a VCS -tag. Some projects use tag names that include more than just the version -string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool -needs to strip the tag prefix to extract the version identifier. For -unreleased software (between tags), the version identifier should provide -enough information to help developers recreate the same tree, while also -giving them an idea of roughly how old the tree is (after version 1.2, before -version 1.3). Many VCS systems can report a description that captures this, -for example `git describe --tags --dirty --always` reports things like -"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the -0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes). - -The version identifier is used for multiple purposes: - -* to allow the module to self-identify its version: `myproject.__version__` -* to choose a name and prefix for a 'setup.py sdist' tarball - -## Theory of Operation - -Versioneer works by adding a special `_version.py` file into your source -tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. - -`_version.py` also contains `$Revision$` markers, and the installation -process marks `_version.py` to have this marker rewritten with a tag name -during the `git archive` command. As a result, generated tarballs will -contain enough information to get the proper version. - -To allow `setup.py` to compute a version too, a `versioneer.py` is added to -the top level of your source tree, next to `setup.py` and the `setup.cfg` -that configures it. This overrides several distutils/setuptools commands to -compute the version when invoked, and changes `setup.py build` and `setup.py -sdist` to replace `_version.py` with a small static file that contains just -the generated version data. - -## Installation - -See [INSTALL.md](./INSTALL.md) for detailed installation instructions. - -## Version-String Flavors - -Code which uses Versioneer can learn about its version string at runtime by -importing `_version` from your main `__init__.py` file and running the -`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can -import the top-level `versioneer.py` and run `get_versions()`. - -Both functions return a dictionary with different flavors of version -information: - -* `['version']`: A condensed version string, rendered using the selected - style. This is the most commonly used value for the project's version - string. The default "pep440" style yields strings like `0.11`, - `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section - below for alternative styles. - -* `['full-revisionid']`: detailed revision identifier. For Git, this is the - full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". - -* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the - commit date in ISO 8601 format. This will be None if the date is not - available. - -* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that - this is only accurate if run in a VCS checkout, otherwise it is likely to - be False or None - -* `['error']`: if the version string could not be computed, this will be set - to a string describing the problem, otherwise it will be None. It may be - useful to throw an exception in setup.py if this is set, to avoid e.g. - creating tarballs with a version string of "unknown". - -Some variants are more useful than others. Including `full-revisionid` in a -bug report should allow developers to reconstruct the exact code being tested -(or indicate the presence of local changes that should be shared with the -developers). `version` is suitable for display in an "about" box or a CLI -`--version` output: it can be easily compared against release notes and lists -of bugs fixed in various releases. - -The installer adds the following text to your `__init__.py` to place a basic -version in `YOURPROJECT.__version__`: - - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - -## Styles - -The setup.cfg `style=` configuration controls how the VCS information is -rendered into a version string. - -The default style, "pep440", produces a PEP440-compliant string, equal to the -un-prefixed tag name for actual releases, and containing an additional "local -version" section with more detail for in-between builds. For Git, this is -TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags ---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the -tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and -that this commit is two revisions ("+2") beyond the "0.11" tag. For released -software (exactly equal to a known tag), the identifier will only contain the -stripped tag, e.g. "0.11". - -Other styles are available. See [details.md](details.md) in the Versioneer -source tree for descriptions. - -## Debugging - -Versioneer tries to avoid fatal errors: if something goes wrong, it will tend -to return a version of "0+unknown". To investigate the problem, run `setup.py -version`, which will run the version-lookup code in a verbose mode, and will -display the full contents of `get_versions()` (including the `error` string, -which may help identify what went wrong). - -## Known Limitations - -Some situations are known to cause problems for Versioneer. This details the -most significant ones. More can be found on Github -[issues page](https://github.com/python-versioneer/python-versioneer/issues). - -### Subprojects - -Versioneer has limited support for source trees in which `setup.py` is not in -the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are -two common reasons why `setup.py` might not be in the root: - -* Source trees which contain multiple subprojects, such as - [Buildbot](https://github.com/buildbot/buildbot), which contains both - "master" and "slave" subprojects, each with their own `setup.py`, - `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI - distributions (and upload multiple independently-installable tarballs). -* Source trees whose main purpose is to contain a C library, but which also - provide bindings to Python (and perhaps other languages) in subdirectories. - -Versioneer will look for `.git` in parent directories, and most operations -should get the right version string. However `pip` and `setuptools` have bugs -and implementation details which frequently cause `pip install .` from a -subproject directory to fail to find a correct version string (so it usually -defaults to `0+unknown`). - -`pip install --editable .` should work correctly. `setup.py install` might -work too. - -Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in -some later version. - -[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking -this issue. The discussion in -[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the -issue from the Versioneer side in more detail. -[pip PR#3176](https://github.com/pypa/pip/pull/3176) and -[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve -pip to let Versioneer work correctly. - -Versioneer-0.16 and earlier only looked for a `.git` directory next to the -`setup.cfg`, so subprojects were completely unsupported with those releases. - -### Editable installs with setuptools <= 18.5 - -`setup.py develop` and `pip install --editable .` allow you to install a -project into a virtualenv once, then continue editing the source code (and -test) without re-installing after every change. - -"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a -convenient way to specify executable scripts that should be installed along -with the python package. - -These both work as expected when using modern setuptools. When using -setuptools-18.5 or earlier, however, certain operations will cause -`pkg_resources.DistributionNotFound` errors when running the entrypoint -script, which must be resolved by re-installing the package. This happens -when the install happens with one version, then the egg_info data is -regenerated while a different version is checked out. Many setup.py commands -cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into -a different virtualenv), so this can be surprising. - -[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes -this one, but upgrading to a newer version of setuptools should probably -resolve it. - - -## Updating Versioneer - -To upgrade your project to a new release of Versioneer, do the following: - -* install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install` in your source tree, to replace - `SRC/_version.py` -* commit any changed files - -## Future Directions - -This tool is designed to make it easily extended to other version-control -systems: all VCS-specific components are in separate directories like -src/git/ . The top-level `versioneer.py` script is assembled from these -components by running make-versioneer.py . In the future, make-versioneer.py -will take a VCS name as an argument, and will construct a version of -`versioneer.py` that is specific to the given VCS. It might also take the -configuration arguments that are currently provided manually during -installation by editing setup.py . Alternatively, it might go the other -direction and include code from all supported VCS systems, reducing the -number of intermediate scripts. - -## Similar projects - -* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time - dependency -* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of - versioneer -* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools - plugin - -## License - -To make Versioneer easier to embed, all its code is dedicated to the public -domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . - -[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg -[pypi-url]: https://pypi.python.org/pypi/versioneer/ -[travis-image]: -https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg -[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer - -""" -# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring -# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements -# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error -# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with -# pylint:disable=attribute-defined-outside-init,too-many-arguments - -import configparser -import errno -import json -import os -import re -import subprocess -import sys -import runpy -from typing import Callable, Dict -import functools - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_root(): - """Get the project root directory. - - We require that all commands are run from the project root, i.e. the - directory that contains setup.py, setup.cfg, and versioneer.py . - """ - root = os.path.realpath(os.path.abspath(os.getcwd())) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - # allow 'python path/to/setup.py COMMAND' - root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") - raise VersioneerBadRootError(err) - try: - # Certain runtime workflows (setup.py install/develop in a setuptools - # tree) execute all dependencies in a single python process, so - # "versioneer" may be imported multiple times, and python's shared - # module-import table will cache the first one. So we can't use - # os.path.dirname(__file__), as that will find whichever - # versioneer.py was first imported, even in later projects. - my_path = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(my_path)[0]) - vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(my_path), versioneer_py)) - except NameError: - pass - return root - - -def get_config_from_root(root): - """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise OSError (if setup.cfg is missing), or - # configparser.NoSectionError (if it lacks a [versioneer] section), or - # configparser.NoOptionError (if it lacks "VCS="). See the docstring at - # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.ConfigParser() - with open(setup_cfg, "r") as cfg_file: - parser.read_file(cfg_file) - VCS = parser.get("versioneer", "VCS") # mandatory - - # Dict-like interface for non-mandatory entries - section = parser["versioneer"] - - cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = section.get("style", "") - cfg.versionfile_source = section.get("versionfile_source") - cfg.versionfile_build = section.get("versionfile_build") - cfg.tag_prefix = section.get("tag_prefix") - if cfg.tag_prefix in ("''", '""'): - cfg.tag_prefix = "" - cfg.parentdir_prefix = section.get("parentdir_prefix") - cfg.verbose = section.get("verbose") - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -# these dictionaries contain VCS-specific tools -LONG_VERSION_PY: Dict[str, str] = {} -HANDLERS: Dict[str, Dict[str, Callable]] = {} - - -def register_vcs_handler(vcs, method): # decorator - """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - HANDLERS.setdefault(vcs, {})[method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - process = None - - popen_kwargs = {} - if sys.platform == "win32": - # This hides the console window if pythonw.exe is used - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - popen_kwargs["startupinfo"] = startupinfo - - for command in commands: - try: - dispcmd = str([command] + args) - # remember shell=False, so use git.cmd on windows, not just git - process = subprocess.Popen([command] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None), **popen_kwargs) - break - except OSError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = process.communicate()[0].strip().decode() - if process.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, process.returncode - return stdout, process.returncode - - -LONG_VERSION_PY['git'] = r''' -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys -import runpy -from typing import Callable, Dict -import functools - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" - git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" - git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "%(STYLE)s" - cfg.tag_prefix = "%(TAG_PREFIX)s" - cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" - cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY: Dict[str, str] = {} -HANDLERS: Dict[str, Dict[str, Callable]] = {} - - -def register_vcs_handler(vcs, method): # decorator - """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - process = None - - popen_kwargs = {} - if sys.platform == "win32": - # This hides the console window if pythonw.exe is used - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - popen_kwargs["startupinfo"] = startupinfo - - for command in commands: - try: - dispcmd = str([command] + args) - # remember shell=False, so use git.cmd on windows, not just git - process = subprocess.Popen([command] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None), **popen_kwargs) - break - except OSError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %%s" %% dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %%s" %% (commands,)) - return None, None - stdout = process.communicate()[0].strip().decode() - if process.returncode != 0: - if verbose: - print("unable to run %%s (error)" %% dispcmd) - print("stdout was %%s" %% stdout) - return None, process.returncode - return stdout, process.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for _ in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %%s but none started with prefix %%s" %% - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - with open(versionfile_abs, "r") as fobj: - for line in fobj: - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - except OSError: - pass - # CJM: Nibabel hack to ensure we can git-archive off-release versions and - # revert to old X.Y.Zdev versions + githash - try: - rel = runpy.run_path(os.path.join(os.path.dirname(versionfile_abs), "info.py")) - keywords["fallback"] = rel["VERSION"] - except (FileNotFoundError, KeyError): - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if "refnames" not in keywords: - raise NotThisMethod("Short version file found") - date = keywords.get("date") - if date is not None: - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - - # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = {r.strip() for r in refnames.strip("()").split(",")} - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %%d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = {r for r in refs if re.search(r'\d', r)} - if verbose: - print("discarding '%%s', no digits" %% ",".join(refs - tags)) - if verbose: - print("likely tags: %%s" %% ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - # Filter out refs that exactly match prefix or that don't start - # with a number once the prefix is stripped (mostly a concern - # when prefix is '') - if not re.match(r'\d', r): - continue - if verbose: - print("picking %%s" %% r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so inspect ./info.py - if verbose: - print("no suitable tags, falling back to info.VERSION or 0+unknown") - return {"version": keywords.get("fallback", "0+unknown"), - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - # GIT_DIR can interfere with correct operation of Versioneer. - # It may be intended to be passed to the Versioneer-versioned project, - # but that should not change where we get our version from. - env = os.environ.copy() - env.pop("GIT_DIR", None) - runner = functools.partial(runner, env=env) - - _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %%s not under git control" %% root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - MATCH_ARGS = ["--match", "%%s*" %% tag_prefix] if tag_prefix else [] - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", *MATCH_ARGS], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], - cwd=root) - # --abbrev-ref was added in git-1.6.3 - if rc != 0 or branch_name is None: - raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") - branch_name = branch_name.strip() - - if branch_name == "HEAD": - # If we aren't exactly on a branch, pick a branch which represents - # the current commit. If all else fails, we are on a branchless - # commit. - branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) - # --contains was added in git-1.5.4 - if rc != 0 or branches is None: - raise NotThisMethod("'git branch --contains' returned error") - branches = branches.split("\n") - - # Remove the first line if we're running detached - if "(" in branches[0]: - branches.pop(0) - - # Strip off the leading "* " from the list of branches. - branches = [branch[2:] for branch in branches] - if "master" in branches: - branch_name = "master" - elif not branches: - branch_name = None - else: - # Pick the first branch that is returned. Good or bad. - branch_name = branches[0] - - pieces["branch"] = branch_name - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparsable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%%s'" - %% describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%%s' doesn't start with prefix '%%s'" - print(fmt %% (full_tag, tag_prefix)) - pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" - %% (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_branch(pieces): - """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . - - The ".dev0" means not master branch. Note that .dev0 sorts backwards - (a feature branch will appear "older" than the master branch). - - Exceptions: - 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0" - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def pep440_split_post(ver): - """Split pep440 version string at the post-release segment. - - Returns the release segments before the post-release and the - post-release version number (or -1 if no post-release segment is present). - """ - vc = str.split(ver, ".post") - return vc[0], int(vc[1] or 0) if len(vc) == 2 else None - - -def render_pep440_pre(pieces): - """TAG[.postN.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post0.devDISTANCE - """ - if pieces["closest-tag"]: - if pieces["distance"]: - # update the post release segment - tag_version, post_version = pep440_split_post(pieces["closest-tag"]) - rendered = tag_version - if post_version is not None: - rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"]) - else: - rendered += ".post0.dev%%d" %% (pieces["distance"]) - else: - # no commits, use the tag as the version - rendered = pieces["closest-tag"] - else: - # exception #1 - rendered = "0.post0.dev%%d" %% pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - return rendered - - -def render_pep440_post_branch(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . - - The ".dev0" means not master branch. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-branch": - rendered = render_pep440_branch(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-post-branch": - rendered = render_pep440_post_branch(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%%s'" %% style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for _ in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} -''' - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - with open(versionfile_abs, "r") as fobj: - for line in fobj: - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - except OSError: - pass - # CJM: Nibabel hack to ensure we can git-archive off-release versions and - # revert to old X.Y.Zdev versions + githash - try: - rel = runpy.run_path(os.path.join(os.path.dirname(versionfile_abs), "info.py")) - keywords["fallback"] = rel["VERSION"] - except (FileNotFoundError, KeyError): - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if "refnames" not in keywords: - raise NotThisMethod("Short version file found") - date = keywords.get("date") - if date is not None: - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = {r.strip() for r in refnames.strip("()").split(",")} - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = {r for r in refs if re.search(r'\d', r)} - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - # Filter out refs that exactly match prefix or that don't start - # with a number once the prefix is stripped (mostly a concern - # when prefix is '') - if not re.match(r'\d', r): - continue - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so inspect ./info.py - if verbose: - print("no suitable tags, falling back to info.VERSION or 0+unknown") - return {"version": keywords.get("fallback", "0+unknown"), - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - # GIT_DIR can interfere with correct operation of Versioneer. - # It may be intended to be passed to the Versioneer-versioned project, - # but that should not change where we get our version from. - env = os.environ.copy() - env.pop("GIT_DIR", None) - runner = functools.partial(runner, env=env) - - _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else [] - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", *MATCH_ARGS], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], - cwd=root) - # --abbrev-ref was added in git-1.6.3 - if rc != 0 or branch_name is None: - raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") - branch_name = branch_name.strip() - - if branch_name == "HEAD": - # If we aren't exactly on a branch, pick a branch which represents - # the current commit. If all else fails, we are on a branchless - # commit. - branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) - # --contains was added in git-1.5.4 - if rc != 0 or branches is None: - raise NotThisMethod("'git branch --contains' returned error") - branches = branches.split("\n") - - # Remove the first line if we're running detached - if "(" in branches[0]: - branches.pop(0) - - # Strip off the leading "* " from the list of branches. - branches = [branch[2:] for branch in branches] - if "master" in branches: - branch_name = "master" - elif not branches: - branch_name = None - else: - # Pick the first branch that is returned. Good or bad. - branch_name = branches[0] - - pieces["branch"] = branch_name - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparsable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def do_vcs_install(manifest_in, versionfile_source, ipy): - """Git-specific installation logic for Versioneer. - - For Git, this means creating/changing .gitattributes to mark _version.py - for export-subst keyword substitution. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] - if ipy: - files.append(ipy) - try: - my_path = __file__ - if my_path.endswith(".pyc") or my_path.endswith(".pyo"): - my_path = os.path.splitext(my_path)[0] + ".py" - versioneer_file = os.path.relpath(my_path) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) - present = False - try: - with open(".gitattributes", "r") as fobj: - for line in fobj: - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - break - except OSError: - pass - if not present: - with open(".gitattributes", "a+") as fobj: - fobj.write(f"{versionfile_source} export-subst\n") - files.append(".gitattributes") - run_command(GITS, ["add", "--"] + files) - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for _ in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.22) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json - -version_json = ''' -%s -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) -""" - - -def versions_from_file(filename): - """Try to determine the version from _version.py if present.""" - try: - with open(filename) as f: - contents = f.read() - except OSError: - raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - raise NotThisMethod("no version_json in _version.py") - return json.loads(mo.group(1)) - - -def write_to_version_file(filename, versions): - """Write the given version number to the given _version.py file.""" - os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) - with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % contents) - - print("set %s to '%s'" % (filename, versions["version"])) - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_branch(pieces): - """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . - - The ".dev0" means not master branch. Note that .dev0 sorts backwards - (a feature branch will appear "older" than the master branch). - - Exceptions: - 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0" - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def pep440_split_post(ver): - """Split pep440 version string at the post-release segment. - - Returns the release segments before the post-release and the - post-release version number (or -1 if no post-release segment is present). - """ - vc = str.split(ver, ".post") - return vc[0], int(vc[1] or 0) if len(vc) == 2 else None - - -def render_pep440_pre(pieces): - """TAG[.postN.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post0.devDISTANCE - """ - if pieces["closest-tag"]: - if pieces["distance"]: - # update the post release segment - tag_version, post_version = pep440_split_post(pieces["closest-tag"]) - rendered = tag_version - if post_version is not None: - rendered += ".post%d.dev%d" % (post_version+1, pieces["distance"]) - else: - rendered += ".post0.dev%d" % (pieces["distance"]) - else: - # no commits, use the tag as the version - rendered = pieces["closest-tag"] - else: - # exception #1 - rendered = "0.post0.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_post_branch(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . - - The ".dev0" means not master branch. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-branch": - rendered = render_pep440_branch(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-post-branch": - rendered = render_pep440_post_branch(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -class VersioneerBadRootError(Exception): - """The project root directory is unknown or missing key files.""" - - -def get_versions(verbose=False): - """Get the project version from whatever source is available. - - Returns dict with two keys: 'version' and 'full'. - """ - if "versioneer" in sys.modules: - # see the discussion in cmdclass.py:get_cmdclass() - del sys.modules["versioneer"] - - root = get_root() - cfg = get_config_from_root(root) - - assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" - handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" - assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - - versionfile_abs = os.path.join(root, cfg.versionfile_source) - - # extract version from first of: _version.py, VCS command (e.g. 'git - # describe'), parentdir. This is meant to work for developers using a - # source checkout, for users of a tarball created by 'setup.py sdist', - # and for users of a tarball/zipball created by 'git archive' or github's - # download-from-tag feature or the equivalent in other VCSes. - - get_keywords_f = handlers.get("get_keywords") - from_keywords_f = handlers.get("keywords") - if get_keywords_f and from_keywords_f: - try: - keywords = get_keywords_f(versionfile_abs) - ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) - if verbose: - print("got version from expanded keyword %s" % ver) - return ver - except NotThisMethod: - pass - - try: - ver = versions_from_file(versionfile_abs) - if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) - return ver - except NotThisMethod: - pass - - from_vcs_f = handlers.get("pieces_from_vcs") - if from_vcs_f: - try: - pieces = from_vcs_f(cfg.tag_prefix, root, verbose) - ver = render(pieces, cfg.style) - if verbose: - print("got version from VCS %s" % ver) - return ver - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - if verbose: - print("got version from parentdir %s" % ver) - return ver - except NotThisMethod: - pass - - if verbose: - print("unable to compute version") - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} - - -def get_version(): - """Get the short version string for this project.""" - return get_versions()["version"] - - -def get_cmdclass(cmdclass=None): - """Get the custom setuptools/distutils subclasses used by Versioneer. - - If the package uses a different cmdclass (e.g. one from numpy), it - should be provide as an argument. - """ - if "versioneer" in sys.modules: - del sys.modules["versioneer"] - # this fixes the "python setup.py develop" case (also 'install' and - # 'easy_install .'), in which subdependencies of the main project are - # built (using setup.py bdist_egg) in the same python process. Assume - # a main project A and a dependency B, which use different versions - # of Versioneer. A's setup.py imports A's Versioneer, leaving it in - # sys.modules by the time B's setup.py is executed, causing B to run - # with the wrong versioneer. Setuptools wraps the sub-dep builds in a - # sandbox that restores sys.modules to it's pre-build state, so the - # parent is protected against the child's "import versioneer". By - # removing ourselves from sys.modules here, before the child build - # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/python-versioneer/python-versioneer/issues/52 - - cmds = {} if cmdclass is None else cmdclass.copy() - - # we add "version" to both distutils and setuptools - try: - from setuptools import Command - except ImportError: - from distutils.core import Command - - class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - print(" date: %s" % vers.get("date")) - if vers["error"]: - print(" error: %s" % vers["error"]) - cmds["version"] = cmd_version - - # we override "build_py" in both distutils and setuptools - # - # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? - # pip install: - # copies source tree to a tempdir before running egg_info/etc - # if .git isn't copied too, 'git describe' will fail - # then does setup.py bdist_wheel, or sometimes setup.py install - # setup.py egg_info -> ? - - # we override different "build_py" commands for both environments - if 'build_py' in cmds: - _build_py = cmds['build_py'] - elif "setuptools" in sys.modules: - from setuptools.command.build_py import build_py as _build_py - else: - from distutils.command.build_py import build_py as _build_py - - class cmd_build_py(_build_py): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_py.run(self) - # now locate _version.py in the new build/ directory and replace - # it with an updated value - if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - cmds["build_py"] = cmd_build_py - - if 'build_ext' in cmds: - _build_ext = cmds['build_ext'] - elif "setuptools" in sys.modules: - from setuptools.command.build_ext import build_ext as _build_ext - else: - from distutils.command.build_ext import build_ext as _build_ext - - class cmd_build_ext(_build_ext): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_ext.run(self) - if self.inplace: - # build_ext --inplace will only build extensions in - # build/lib<..> dir with no _version.py to write to. - # As in place builds will already have a _version.py - # in the module dir, we do not need to write one. - return - # now locate _version.py in the new build/ directory and replace - # it with an updated value - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - cmds["build_ext"] = cmd_build_ext - - if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - # nczeczulin reports that py2exe won't like the pep440-style string - # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. - # setup(console=[{ - # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION - # "product_version": versioneer.get_version(), - # ... - - class cmd_build_exe(_build_exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["build_exe"] = cmd_build_exe - del cmds["build_py"] - - if 'py2exe' in sys.modules: # py2exe enabled? - from py2exe.distutils_buildexe import py2exe as _py2exe - - class cmd_py2exe(_py2exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _py2exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["py2exe"] = cmd_py2exe - - # we override different "sdist" commands for both environments - if 'sdist' in cmds: - _sdist = cmds['sdist'] - elif "setuptools" in sys.modules: - from setuptools.command.sdist import sdist as _sdist - else: - from distutils.command.sdist import sdist as _sdist - - class cmd_sdist(_sdist): - def run(self): - versions = get_versions() - self._versioneer_generated_versions = versions - # unless we update this, the command will keep using the old - # version - self.distribution.metadata.version = versions["version"] - return _sdist.run(self) - - def make_release_tree(self, base_dir, files): - root = get_root() - cfg = get_config_from_root(root) - _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory - # (remembering that it may be a hardlink) and replace it with an - # updated value - target_versionfile = os.path.join(base_dir, cfg.versionfile_source) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) - cmds["sdist"] = cmd_sdist - - return cmds - - -CONFIG_ERROR = """ -setup.cfg is missing the necessary Versioneer configuration. You need -a section like: - - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = - parentdir_prefix = myproject- - -You will also need to edit your setup.py to use the results: - - import versioneer - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), ...) - -Please read the docstring in ./versioneer.py for configuration instructions, -edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. -""" - -SAMPLE_CONFIG = """ -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. - -[versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = - -""" - -OLD_SNIPPET = """ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions -""" - -INIT_PY_SNIPPET = """ -from . import {0} -__version__ = {0}.get_versions()['version'] -""" - - -def do_setup(): - """Do main VCS-independent setup function for installing Versioneer.""" - root = get_root() - try: - cfg = get_config_from_root(root) - except (OSError, configparser.NoSectionError, - configparser.NoOptionError) as e: - if isinstance(e, (OSError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) - with open(os.path.join(root, "setup.cfg"), "a") as f: - f.write(SAMPLE_CONFIG) - print(CONFIG_ERROR, file=sys.stderr) - return 1 - - print(" creating %s" % cfg.versionfile_source) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except OSError: - old = "" - module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] - snippet = INIT_PY_SNIPPET.format(module) - if OLD_SNIPPET in old: - print(" replacing boilerplate in %s" % ipy) - with open(ipy, "w") as f: - f.write(old.replace(OLD_SNIPPET, snippet)) - elif snippet not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(snippet) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except OSError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") - - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-subst keyword - # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) - return 0 - - -def scan_setup_py(): - """Validate the contents of setup.py against Versioneer's expectations.""" - found = set() - setters = False - errors = 0 - with open("setup.py", "r") as f: - for line in f.readlines(): - if "import versioneer" in line: - found.add("import") - if "versioneer.get_cmdclass()" in line: - found.add("cmdclass") - if "versioneer.get_version()" in line: - found.add("get_version") - if "versioneer.VCS" in line: - setters = True - if "versioneer.versionfile_source" in line: - setters = True - if len(found) != 3: - print("") - print("Your setup.py appears to be missing some important items") - print("(but I might be wrong). Please make sure it has something") - print("roughly like the following:") - print("") - print(" import versioneer") - print(" setup( version=versioneer.get_version(),") - print(" cmdclass=versioneer.get_cmdclass(), ...)") - print("") - errors += 1 - if setters: - print("You should remove lines like 'versioneer.VCS = ' and") - print("'versioneer.versionfile_source = ' . This configuration") - print("now lives in setup.cfg, and should be removed from setup.py") - print("") - errors += 1 - return errors - - -if __name__ == "__main__": - cmd = sys.argv[1] - if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1) From 42a8f3d7b25cb79fe590964410bd871aaaac425c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 14:32:21 -0500 Subject: [PATCH 153/702] MNT: Add setuptools_scm config --- .git_archival.txt | 4 ++++ .gitattributes | 1 + .gitignore | 4 ++++ pyproject.toml | 11 +++++++---- 4 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 .git_archival.txt diff --git a/.git_archival.txt b/.git_archival.txt new file mode 100644 index 0000000000..62556c5202 --- /dev/null +++ b/.git_archival.txt @@ -0,0 +1,4 @@ +node: $Format:%H$ +node-date: $Format:%cI$ +describe-name: $Format:%(describe:match=[0-9]*)$ +ref-names: $Format:%D$ diff --git a/.gitattributes b/.gitattributes index e69de29bb2..a94cb2f8c2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -0,0 +1 @@ +.git_archival.txt export-subst diff --git a/.gitignore b/.gitignore index 744dc5becd..f644bb9182 100644 --- a/.gitignore +++ b/.gitignore @@ -88,3 +88,7 @@ venv/ .buildbot.patch .vscode for_testing/ + +# Generated by setuptools_scm # +############################### +_version.py diff --git a/pyproject.toml b/pyproject.toml index f98e03119f..8ac1d1d575 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] -requires = ["setuptools"] -build-backend = "setuptools.build_meta:__legacy__" +requires = ["setuptools", "setuptools_scm[toml]>=6.2"] +build-backend = "setuptools.build_meta" [project] name = "nibabel" @@ -24,7 +24,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Topic :: Scientific/Engineering", ] -# Version from versioneer +# Version from setuptools_scm dynamic = ["version"] [project.urls] @@ -95,4 +95,7 @@ force-exclude = """ [tool.isort] profile = "black" line_length = 99 -extend_skip = ["_version.py", "externals", "versioneer.py"] +extend_skip = ["_version.py", "externals"] + +[tool.setuptools_scm] +write_to = "nibabel/_version.py" From 296e4feb970577f8155516cbe9c9624ebc11582a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 15:37:04 -0500 Subject: [PATCH 154/702] MNT: Load __version__ from setuptools_scm file --- nibabel/pkg_info.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 010e4107ac..579c21f12e 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -2,9 +2,10 @@ from packaging.version import Version -from . import _version - -__version__ = _version.get_versions()['version'] +try: + from ._version import __version__ +except ImportError: + __version__ = '0+unknown' def _cmp(a, b): From 4c1da7960ff3f7aa9fd009140a122effe60d4137 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 16:19:50 -0500 Subject: [PATCH 155/702] MNT: Remove manifest and package data spec These are now handled by setuptools_scm. Also, clarify the expected top-level packages in find --- MANIFEST.in | 5 ----- pyproject.toml | 7 ++----- 2 files changed, 2 insertions(+), 10 deletions(-) delete mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index c40f6110bf..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,5 +0,0 @@ -include AUTHOR COPYING Makefile* MANIFEST.in setup* README.* pyproject.toml -include Changelog TODO requirements.txt -recursive-include doc * -recursive-include bin * -recursive-include tools * diff --git a/pyproject.toml b/pyproject.toml index 8ac1d1d575..934dedec95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,11 +75,8 @@ platforms = ["OS Independent"] provides = ["nibabel", "nisext"] zip-safe = false -[tool.setuptools.packages] -find = {} - -[tool.setuptools.package-data] -nibabel = ["tests/data/*", "*/tests/data/*", "benchmarks/pytest.benchmark.ini"] +[tool.setuptools.packages.find] +include = ["nibabel*", "nisext*"] [tool.blue] line_length = 99 From 63b55864ff61b96b569e4234f3a2fb53b4094798 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 2 Jan 2023 20:45:50 -0500 Subject: [PATCH 156/702] RF: Deprecate tmpdirs.TemporaryDirectory, duplicate of tempfile.TemporaryDirectory Move examples into __init__ to suppress warnings during doctests --- nibabel/tmpdirs.py | 55 +++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index cb921418e3..31e76c04e1 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -9,46 +9,40 @@ """Contexts for *with* statement providing temporary directories """ import os -import shutil -from tempfile import mkdtemp, template +import tempfile +from .deprecated import deprecate_with_version -class TemporaryDirectory: + +class TemporaryDirectory(tempfile.TemporaryDirectory): """Create and return a temporary directory. This has the same behavior as mkdtemp but can be used as a context manager. Upon exiting the context, the directory and everything contained in it are removed. - - Examples - -------- - >>> import os - >>> with TemporaryDirectory() as tmpdir: - ... fname = os.path.join(tmpdir, 'example_file.txt') - ... with open(fname, 'wt') as fobj: - ... _ = fobj.write('a string\\n') - >>> os.path.exists(tmpdir) - False """ - def __init__(self, suffix='', prefix=template, dir=None): - self.name = mkdtemp(suffix, prefix, dir) - self._closed = False - - def __enter__(self): - return self.name - - def cleanup(self): - if not self._closed: - shutil.rmtree(self.name) - self._closed = True - - def __exit__(self, exc, value, tb): - self.cleanup() - return False + @deprecate_with_version( + 'Please use the standard library tempfile.TemporaryDirectory', + '5.0', + '7.0', + ) + def __init__(self, suffix='', prefix=tempfile.template, dir=None): + """ + Examples + -------- + >>> import os + >>> with TemporaryDirectory() as tmpdir: + ... fname = os.path.join(tmpdir, 'example_file.txt') + ... with open(fname, 'wt') as fobj: + ... _ = fobj.write('a string\\n') + >>> os.path.exists(tmpdir) + False + """ + return super().__init__(suffix, prefix, dir) -class InTemporaryDirectory(TemporaryDirectory): +class InTemporaryDirectory(tempfile.TemporaryDirectory): """Create, return, and change directory to a temporary directory Notes @@ -60,9 +54,10 @@ class InTemporaryDirectory(TemporaryDirectory): Examples -------- >>> import os + >>> from pathlib import Path >>> my_cwd = os.getcwd() >>> with InTemporaryDirectory() as tmpdir: - ... _ = open('test.txt', 'wt').write('some text') + ... _ = Path('test.txt').write_text('some text') ... assert os.path.isfile('test.txt') ... assert os.path.isfile(os.path.join(tmpdir, 'test.txt')) >>> os.path.exists(tmpdir) From bb376b122b62b66d75a31bf7a6cc50ca889701c0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 2 Jan 2023 23:40:04 -0500 Subject: [PATCH 157/702] RF: Reimplement tmpdirs tools as contextmanagers Simplifying these tools as wrapped functions makes the logic easier to read. --- nibabel/tmpdirs.py | 64 ++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 31e76c04e1..5a8eccfa2c 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -10,6 +10,19 @@ """ import os import tempfile +from contextlib import contextmanager + +try: + from contextlib import chdir as _chdir +except ImportError: # PY310 + + @contextmanager + def _chdir(path): + cwd = os.getcwd() + os.chdir(path) + yield + os.chdir(cwd) + from .deprecated import deprecate_with_version @@ -42,7 +55,8 @@ def __init__(self, suffix='', prefix=tempfile.template, dir=None): return super().__init__(suffix, prefix, dir) -class InTemporaryDirectory(tempfile.TemporaryDirectory): +@contextmanager +def InTemporaryDirectory(): """Create, return, and change directory to a temporary directory Notes @@ -65,18 +79,12 @@ class InTemporaryDirectory(tempfile.TemporaryDirectory): >>> os.getcwd() == my_cwd True """ - - def __enter__(self): - self._pwd = os.getcwd() - os.chdir(self.name) - return super().__enter__() - - def __exit__(self, exc, value, tb): - os.chdir(self._pwd) - return super().__exit__(exc, value, tb) + with tempfile.TemporaryDirectory() as tmpdir, _chdir(tmpdir): + yield tmpdir -class InGivenDirectory: +@contextmanager +def InGivenDirectory(path=None): """Change directory to given directory for duration of ``with`` block Useful when you want to use `InTemporaryDirectory` for the final test, but @@ -98,27 +106,15 @@ class InGivenDirectory: You can then look at the temporary file outputs to debug what is happening, fix, and finally replace ``InGivenDirectory`` with ``InTemporaryDirectory`` again. - """ - - def __init__(self, path=None): - """Initialize directory context manager - Parameters - ---------- - path : None or str, optional - path to change directory to, for duration of ``with`` block. - Defaults to ``os.getcwd()`` if None - """ - if path is None: - path = os.getcwd() - self.path = os.path.abspath(path) - - def __enter__(self): - self._pwd = os.path.abspath(os.getcwd()) - if not os.path.isdir(self.path): - os.mkdir(self.path) - os.chdir(self.path) - return self.path - - def __exit__(self, exc, value, tb): - os.chdir(self._pwd) + Parameters + ---------- + path : None or str, optional + path to change directory to, for duration of ``with`` block. + Defaults to ``os.getcwd()`` if None + """ + if path is None: + path = os.getcwd() + os.makedirs(path, exist_ok=True) + with _chdir(path): + yield os.path.abspath(path) From 27b757b51b13e4d62c46f2f9ac628e30e27306f4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 00:42:27 -0500 Subject: [PATCH 158/702] MNT: Schedule final removal of TemporaryDirectory --- nibabel/tests/test_removalschedule.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 939895abbd..db99ae3a46 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -14,6 +14,12 @@ ] OBJECT_SCHEDULE = [ + ( + '8.0.0', + [ + ('nibabel.tmpdirs', 'TemporaryDirectory'), + ], + ), ( '7.0.0', [ From 5cc66aa2afefe94050b231961609b34029b80a2f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 16:58:44 -0500 Subject: [PATCH 159/702] RF: Update pkg_info to work with setuptools_scm --- .gitattributes | 1 + nibabel/pkg_info.py | 48 ++++++++++++++++++++++++++++----------------- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/.gitattributes b/.gitattributes index a94cb2f8c2..919c815795 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,2 @@ .git_archival.txt export-subst +nibabel/pkg_info.py export-subst diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 579c21f12e..cf69117b5a 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -1,4 +1,5 @@ import sys +from subprocess import run from packaging.version import Version @@ -8,6 +9,9 @@ __version__ = '0+unknown' +COMMIT_HASH = '$Format:%h$' + + def _cmp(a, b): """Implementation of ``cmp`` for Python 3""" return (a > b) - (a < b) @@ -64,15 +68,20 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): return _cmp(Version(version_str), Version(pkg_version_str)) -def pkg_commit_hash(pkg_path=None): +def pkg_commit_hash(pkg_path: str = None): """Get short form of commit hash - Versioneer placed a ``_version.py`` file in the package directory. This file - gets updated on installation or ``git archive``. - We inspect the contents of ``_version`` to detect whether we are in a - repository, an archive of the repository, or an installed package. + In this file is a variable called COMMIT_HASH. This contains a substitution + pattern that may have been filled by the execution of ``git archive``. + + We get the commit hash from (in order of preference): - If detection fails, we return a not-found placeholder tuple + * A substituted value in ``archive_subst_hash`` + * A truncated commit hash value that is part of the local portion of the + version + * git's output, if we are in a git repository + + If all these fail, we return a not-found placeholder tuple Parameters ---------- @@ -86,17 +95,20 @@ def pkg_commit_hash(pkg_path=None): hash_str : str short form of hash """ - versions = _version.get_versions() - hash_str = versions['full-revisionid'][:7] - if hasattr(_version, 'version_json'): - hash_from = 'installation' - elif not _version.get_keywords()['full'].startswith('$Format:'): - hash_from = 'archive substitution' - elif versions['version'] == '0+unknown': - hash_from, hash_str = '(none found)', '' - else: - hash_from = 'repository' - return hash_from, hash_str + if not COMMIT_HASH.startswith('$Format'): # it has been substituted + return 'archive substitution', COMMIT_HASH + ver = Version(__version__) + if ver.local is not None and ver.local.startswith('g'): + return ver.local[1:8], 'installation' + # maybe we are in a repository + proc = run( + ('git', 'rev-parse', '--short', 'HEAD'), + capture_output=True, + cwd=pkg_path, + ) + if proc.stdout: + return 'repository', proc.stdout.strip() + return '(none found)', '' def get_pkg_info(pkg_path): @@ -112,7 +124,7 @@ def get_pkg_info(pkg_path): context : dict with named parameters of interest """ - src, hsh = pkg_commit_hash() + src, hsh = pkg_commit_hash(pkg_path) import numpy return dict( From 9b6705d3439f2702773b4175fa83fce1917a9690 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 17:10:38 -0500 Subject: [PATCH 160/702] RF: Drop now-unused info.VERSION --- nibabel/info.py | 13 ------------- nibabel/tests/test_pkg_info.py | 19 ------------------- 2 files changed, 32 deletions(-) diff --git a/nibabel/info.py b/nibabel/info.py index bdd291728a..96031ac954 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -6,19 +6,6 @@ relative imports. """ -# nibabel version information -# This is a fall-back for versioneer when installing from a git archive. -# This should be set to the intended next version + dev to indicate a -# development (pre-release) version. -_version_major = 5 -_version_minor = 0 -_version_micro = 0 -_version_extra = '.dev0' - -# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" -VERSION = f'{_version_major}.{_version_minor}.{_version_micro}{_version_extra}' - - # Note: this long_description is the canonical place to edit this text. # It also appears in README.rst, but it should get there by running # ``tools/refresh_readme.py`` which pulls in this version. diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 32059c68d8..0d8146fdb0 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -7,8 +7,6 @@ import nibabel as nib from nibabel.pkg_info import cmp_pkg_version -from ..info import VERSION - def test_pkg_info(): """Smoke test nibabel.get_info() @@ -26,23 +24,6 @@ def test_version(): assert nib.pkg_info.__version__ == nib.__version__ -def test_fallback_version(): - """Test fallback version is up-to-date - - This should only fail if we fail to bump nibabel.info.VERSION immediately - after release - """ - ver = Version(nib.__version__) - fallback = Version(VERSION) - assert ( - # Releases have no local information, archive matches versioneer - ver.local is None - or - # dev version should be larger than tag+commit-githash - fallback >= ver - ), 'nibabel.info.VERSION does not match latest tag information' - - def test_cmp_pkg_version_0(): # Test version comparator assert cmp_pkg_version(nib.__version__) == 0 From cf564cf5c84927a21306a273fa72f59d72296bef Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 1 Jan 2023 23:04:16 -0500 Subject: [PATCH 161/702] MNT: Add type annotations to other pkg_info functions --- nibabel/pkg_info.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index cf69117b5a..068600b4e6 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import sys from subprocess import run @@ -12,12 +14,12 @@ COMMIT_HASH = '$Format:%h$' -def _cmp(a, b): +def _cmp(a, b) -> int: """Implementation of ``cmp`` for Python 3""" return (a > b) - (a < b) -def cmp_pkg_version(version_str, pkg_version_str=__version__): +def cmp_pkg_version(version_str: str, pkg_version_str: str = __version__) -> int: """Compare ``version_str`` to current package version This comparator follows `PEP-440`_ conventions for determining version @@ -68,7 +70,7 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): return _cmp(Version(version_str), Version(pkg_version_str)) -def pkg_commit_hash(pkg_path: str = None): +def pkg_commit_hash(pkg_path: str = None) -> tuple[str, str]: """Get short form of commit hash In this file is a variable called COMMIT_HASH. This contains a substitution @@ -111,7 +113,7 @@ def pkg_commit_hash(pkg_path: str = None): return '(none found)', '' -def get_pkg_info(pkg_path): +def get_pkg_info(pkg_path: str) -> dict: """Return dict describing the context of this package Parameters From 81a4316438fd9fab14e0718ce9a790c72cb051c1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 12:32:14 -0500 Subject: [PATCH 162/702] DOC: Fix bare back-ticks in changelog --- Changelog | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Changelog b/Changelog index 8cb27e84f1..d1cad17aa8 100644 --- a/Changelog +++ b/Changelog @@ -652,8 +652,8 @@ Enhancements Bug fixes --------- -* Preserve first point of `LazyTractogram` (pr/588) (MC, reviewed by Nil - Goyette, CM, MB) +* Preserve first point of :py:class:`~nibabel.streamlines.LazyTractogram` + (pr/588) (MC, reviewed by Nil Goyette, CM, MB) * Stop adding extraneous metadata padding (pr/593) (Jon Stutters, reviewed by CM, MB) * Accept lower-case orientation codes in TRK files (pr/600) (Kesshi Jordan, @@ -727,7 +727,7 @@ Enhancements * Support for alternative header field name variants in .PAR files (pr/507) (Gregory R. Lee) * Various enhancements to streamlines API by MC: support for reading TRK - version 1 (pr/512); concatenation of tractograms using `+`/`+=` operators + version 1 (pr/512); concatenation of tractograms using ``+``/``+=`` operators (pr/495); function to concatenate multiple ArraySequence objects (pr/494) * Support for numpy 1.12 (pr/500, pr/502) (MC, MB) * Allow dtype specifiers as fileslice input (pr/485) (MB) @@ -807,7 +807,7 @@ Enhancements (Brendan Moloney); * More explicit error when trying to read image from non-existent file (pr/455) (Ariel Rokem); -* Extension to `nib-ls` command to show image statistics (pr/437) and other +* Extension to ``nib-ls`` command to show image statistics (pr/437) and other header files (pr/348) (Yarik Halchenko). Bug fixes @@ -1100,7 +1100,7 @@ Special thanks to Chris Burns, Jarrod Millman and Yaroslav Halchenko. * ``parrec2nii`` script to convert PAR/REC images to NIfTI format (MH) * Very preliminary, limited and highly experimental DICOM reading support (MB, Ian Nimmo Smith). -* Some functions (`nibabel.funcs`) for basic image shape changes, including +* Some functions (:py:mod:`nibabel.funcs`) for basic image shape changes, including the ability to transform to the image with data closest to the cononical image orientation (first axis left-to-right, second back-to-front, third down-to-up) (MB, Jonathan Taylor) @@ -1143,7 +1143,7 @@ visiting the URL:: * Bugfix: Removed left-over print statement in extension code. * Bugfix: Prevent saving of bogus 'None.nii' images when the filename was previously assign, before calling NiftiImage.save() (Closes: #517920). -* Bugfix: Extension length was to short for all `edata` whose length matches +* Bugfix: Extension length was to short for all ``edata`` whose length matches n*16-8, for all integer n. 0.20090205.1 (Thu, 5 Feb 2009) @@ -1152,13 +1152,13 @@ visiting the URL:: * This release is the first in a series that aims stabilize the API and finally result in PyNIfTI 1.0 with full support of the NIfTI1 standard. * The whole package was restructured. The included renaming - `nifti.nifti(image,format,clibs)` to `nifti.(image,format,clibs)`. Redirect + ``nifti.nifti(image,format,clibs)`` to ``nifti.(image,format,clibs)``. Redirect modules make sure that existing user code will not break, but they will issue a DeprecationWarning and will be removed with the release of PyNIfTI 1.0. * Added a special extension that can embed any serializable Python object into the NIfTI file header. The contents of this extension is - automatically expanded upon request into the `.meta` attribute of each + automatically expanded upon request into the ``.meta`` attribute of each NiftiImage. When saving files to disk the content of the dictionary is also automatically dumped into this extension. Embedded meta data is not loaded automatically, since this has security @@ -1173,11 +1173,11 @@ visiting the URL:: * Added methods :meth:`~nifti.format.NiftiFormat.vx2q` and :meth:`~nifti.format.NiftiFormat.vx2s` to convert voxel indices into coordinates defined by qform or sform respectively. -* Updating the `cal_min` and `cal_max` values in the NIfTI header when +* Updating the ``cal_min`` and ``cal_max`` values in the NIfTI header when saving a file is now conditional, but remains enabled by default. * Full set of methods to query and modify axis units. This includes - expanding the previous `xyzt_units` field in the header dictionary into - editable `xyz_unit` and `time_unit` fields. The former `xyzt_units` field + expanding the previous ``xyzt_units`` field in the header dictionary into + editable ``xyz_unit`` and ``time_unit`` fields. The former ``xyzt_units`` field is no longer available. See: :meth:`~nifti.format.NiftiFormat.getXYZUnit`, :meth:`~nifti.format.NiftiFormat.setXYZUnit`, @@ -1193,19 +1193,19 @@ visiting the URL:: :attr:`~nifti.format.NiftiFormat.qform_code`, :attr:`~nifti.format.NiftiFormat.sform_code` * Each image instance is now able to generate a human-readable dump of its - most important header information via `__str__()`. + most important header information via ``__str__()``. * :class:`~nifti.image.NiftiImage` objects can now be pickled. * Switched to NumPy's distutils for building the package. Cleaned and simplified the build procedure. Added optimization flags to SWIG call. * :attr:`nifti.image.NiftiImage.filename` can now also be used to assign a filename. * Introduced :data:`nifti.__version__` as canonical version string. -* Removed `updateQFormFromQuarternion()` from the list of public methods of +* Removed ``updateQFormFromQuarternion()`` from the list of public methods of :class:`~nifti.format.NiftiFormat`. This is an internal method that should not be used in user code. However, a redirect to the new method will remain in-place until PyNIfTI 1.0. * Bugfix: :meth:`~nifti.image.NiftiImage.getScaledData` returns a - unmodified data array if `slope` is set to zero (as required by the NIfTI + unmodified data array if ``slope`` is set to zero (as required by the NIfTI standard). Thanks to Thomas Ross for reporting. * Bugfix: Unicode filenames are now handled properly, as long as they do not contain pure-unicode characters (since the NIfTI library does not support @@ -1259,9 +1259,9 @@ visiting the URL:: Thyreau for reporting. * Bugfix: setPixDims() stored meaningless values. * Use new NumPy API and replace deprecated function calls - (`PyArray_FromDimsAndData`). + (``PyArray_FromDimsAndData``). * Initial support for memory mapped access to uncompressed NIfTI files - (`MemMappedNiftiImage`). + (``MemMappedNiftiImage``). * Add a proper Makefile and setup.cfg for compiling PyNIfTI under Windows with MinGW. * Include a minimal copy of the most recent nifticlibs (just libniftiio and From a63b3db3261fbfd211c91de436809ea2115234cd Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 12:32:38 -0500 Subject: [PATCH 163/702] DOC: Get correct version from setuptools_scm _version.py --- doc/tools/build_modref_templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index 2fded8fbfc..11eae99741 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -57,7 +57,7 @@ def abort(error): from runpy import run_path try: - source_version = run_path(version_file)['get_versions']()['version'] + source_version = run_path(version_file)['version'] except (FileNotFoundError, KeyError): pass if source_version == '0+unknown': From e662e551da657f58bff90152fda5c7d1c98af32b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 12:34:19 -0500 Subject: [PATCH 164/702] DOC: Update copyright year to 2023 --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 04ac32483b..82fe25adac 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -94,7 +94,7 @@ # General information about the project. project = 'NiBabel' -copyright = f"2006-2022, {authors['name']} <{authors['email']}>" +copyright = f"2006-2023, {authors['name']} <{authors['email']}>" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From ca5cad2a21b7b4301fc6cec374c64d2d060f62e1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 00:37:03 -0500 Subject: [PATCH 165/702] ENH: Add function to schedule a warning to become an error --- nibabel/deprecated.py | 39 ++++++++++++++++++++++++++++++++ nibabel/tests/test_deprecated.py | 32 +++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index aa41675dbd..ab9f31c8cf 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -1,7 +1,9 @@ """Module to help with deprecating objects and classes """ +from __future__ import annotations import warnings +from typing import Type from .deprecator import Deprecator from .pkg_info import cmp_pkg_version @@ -77,3 +79,40 @@ class VisibleDeprecationWarning(UserWarning): deprecate_with_version = Deprecator(cmp_pkg_version) + + +def alert_future_error( + msg: str, + version: str, + *, + warning_class: Type[Warning] = FutureWarning, + error_class: Type[Exception] = RuntimeError, + warning_rec: str = '', + error_rec: str = '', + stacklevel: int = 2, +): + """Warn or error with appropriate messages for changing functionality. + + Parameters + ---------- + msg : str + Description of the condition that led to the alert + version : str + NiBabel version at which the warning will become an error + warning_class : subclass of Warning, optional + Warning class to emit before version + error_class : subclass of Exception, optional + Error class to emit after version + warning_rec : str, optional + Guidance for suppressing the warning and avoiding the future error + error_rec: str, optional + Guidance for resolving the error + stacklevel: int, optional + Warnings stacklevel to provide; note that this will be incremented by + 1, so provide the stacklevel you would provide directly to warnings.warn() + """ + if cmp_pkg_version(version) >= 0: + msg = f'{msg} This will error in NiBabel {version}. {warning_rec}' + warnings.warn(msg.strip(), warning_class, stacklevel=stacklevel + 1) + else: + raise error_class(f'{msg} {error_rec}'.strip()) diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index 962f9c0827..8b9f6c360f 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -6,7 +6,12 @@ import pytest from nibabel import pkg_info -from nibabel.deprecated import FutureWarningMixin, ModuleProxy, deprecate_with_version +from nibabel.deprecated import ( + FutureWarningMixin, + ModuleProxy, + alert_future_error, + deprecate_with_version, +) from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF @@ -79,3 +84,28 @@ def func(): assert func() == 99 finally: pkg_info.cmp_pkg_version.__defaults__ = ('2.0',) + + +def test_alert_future_error(): + with pytest.warns(FutureWarning): + alert_future_error( + 'Message', + '9999.9.9', + warning_rec='Silence this warning by doing XYZ.', + error_rec='Fix this issue by doing XYZ.', + ) + with pytest.raises(RuntimeError): + alert_future_error( + 'Message', + '1.0.0', + warning_rec='Silence this warning by doing XYZ.', + error_rec='Fix this issue by doing XYZ.', + ) + with pytest.raises(ValueError): + alert_future_error( + 'Message', + '1.0.0', + warning_rec='Silence this warning by doing XYZ.', + error_rec='Fix this issue by doing XYZ.', + error_class=ValueError, + ) From 0aeecbc3b31ab44508a7a448250db018519a3663 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 00:37:56 -0500 Subject: [PATCH 166/702] ENH: Schedule int64 warning to convert to error at 5.0 --- nibabel/nifti1.py | 14 +++++++++----- nibabel/tests/test_nifti1.py | 14 ++++++++++---- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a10686145b..61a6da3660 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -21,6 +21,7 @@ from .arrayproxy import get_obj_dtype from .batteryrunners import Report from .casting import have_binary128 +from .deprecated import alert_future_error from .filebasedimages import SerializableImage from .optpkg import optional_package from .quaternions import fillpositive, mat2quat, quat2mat @@ -1831,13 +1832,16 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtyp # already fail. danger_dts = (np.dtype('int64'), np.dtype('uint64')) if header is None and dtype is None and get_obj_dtype(dataobj) in danger_dts: - msg = ( + alert_future_error( f'Image data has type {dataobj.dtype}, which may cause ' - 'incompatibilities with other tools. This will error in ' - 'NiBabel 5.0. This warning can be silenced ' - f'by passing the dtype argument to {self.__class__.__name__}().' + 'incompatibilities with other tools.', + '5.0', + warning_rec='This warning can be silenced by passing the dtype argument' + f' to {self.__class__.__name__}().', + error_rec='To use this type, pass an explicit header or dtype argument' + f' to {self.__class__.__name__}().', + error_class=ValueError, ) - warnings.warn(msg, FutureWarning, stacklevel=2) super().__init__(dataobj, affine, header, extra, file_map, dtype) # Force set of s/q form when header is None unless affine is also None if header is None and affine is not None: diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 2cbbfc1f5d..808d06c15a 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -35,6 +35,7 @@ slice_order_codes, ) from nibabel.optpkg import optional_package +from nibabel.pkg_info import cmp_pkg_version from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory @@ -766,16 +767,21 @@ class TestNifti1Pair(tana.TestAnalyzeImage, tspm.ImageScalingMixin): image_class = Nifti1Pair supported_np_types = TestNifti1PairHeader.supported_np_types - def test_int64_warning(self): + def test_int64_warning_or_error(self): # Verify that initializing with (u)int64 data and no - # header/dtype info produces a warning + # header/dtype info produces a warning/error img_klass = self.image_class hdr_klass = img_klass.header_class for dtype in (np.int64, np.uint64): data = np.arange(24, dtype=dtype).reshape((2, 3, 4)) - with pytest.warns(FutureWarning): + # Starts as a warning, transitions to error at 5.0 + if cmp_pkg_version('5.0') < 0: + cm = pytest.raises(ValueError) + else: + cm = pytest.warns(FutureWarning) + with cm: img_klass(data, np.eye(4)) - # No warnings if we're explicit, though + # No problems if we're explicit, though with clear_and_catch_warnings(): warnings.simplefilter('error') img_klass(data, np.eye(4), dtype=dtype) From 56db684206568ad1d5e18e2de537e5090fec2ba3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Jan 2023 21:54:09 -0500 Subject: [PATCH 167/702] MNT: Switch to hatch build backend --- pyproject.toml | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 934dedec95..c5b6c35899 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] -requires = ["setuptools", "setuptools_scm[toml]>=6.2"] -build-backend = "setuptools.build_meta" +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" [project] name = "nibabel" @@ -70,13 +70,22 @@ test = [ ] zstd = ["pyzstd >= 0.14.3"] -[tool.setuptools] -platforms = ["OS Independent"] -provides = ["nibabel", "nisext"] -zip-safe = false +[tool.hatch.build.targets.sdist] +exclude = [".git_archival.txt"] -[tool.setuptools.packages.find] -include = ["nibabel*", "nisext*"] +[tool.hatch.build.targets.wheel] +packages = ["nibabel", "nisext"] +exclude = [ + # 56MB test file does not need to be installed everywhere + "nibabel/nicom/tests/data/4d_multiframe_test.dcm", +] + +[tool.hatch.version] +source = "vcs" +raw-options = { version_scheme = "release-branch-semver" } + +[tool.hatch.build.hooks.vcs] +version-file = "nibabel/_version.py" [tool.blue] line_length = 99 @@ -93,6 +102,3 @@ force-exclude = """ profile = "black" line_length = 99 extend_skip = ["_version.py", "externals"] - -[tool.setuptools_scm] -write_to = "nibabel/_version.py" From 41eebc16185479f5012bef951256f2f18b18e66c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Jan 2023 22:22:37 -0500 Subject: [PATCH 168/702] MNT: Require Python 3.8, numpy 1.19 --- min-requirements.txt | 2 +- pyproject.toml | 6 +++--- requirements.txt | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/min-requirements.txt b/min-requirements.txt index 8308f6e076..305f16dcbd 100644 --- a/min-requirements.txt +++ b/min-requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py -numpy ==1.17 +numpy ==1.19 packaging ==17 setuptools diff --git a/pyproject.toml b/pyproject.toml index 934dedec95..278aa58b6b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,8 +9,8 @@ authors = [{ name = "NiBabel developers", email = "neuroimaging@python.org" }] maintainers = [{ name = "Christopher Markiewicz" }] readme = "README.rst" license = { text = "MIT License" } -requires-python = ">=3.7" -dependencies = ["numpy >=1.17", "packaging >=17", "setuptools"] +requires-python = ">=3.8" +dependencies = ["numpy >=1.19", "packaging >=17", "setuptools"] classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", @@ -18,10 +18,10 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering", ] # Version from setuptools_scm diff --git a/requirements.txt b/requirements.txt index 2c77ae1e0d..1d1e434609 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py -numpy >=1.17 +numpy >=1.19 packaging >=17 setuptools From c2e2feffba64353090d3afbf96c56bd467919c99 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Jan 2023 22:23:40 -0500 Subject: [PATCH 169/702] CI: Stop testing 3.7 --- .github/workflows/stable.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 7cf7aaab43..0c560bcb4d 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -91,7 +91,7 @@ jobs: strategy: matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: [3.7, 3.8, 3.9, "3.10", "3.11"] + python-version: [3.8, 3.9, "3.10", "3.11"] architecture: ['x64', 'x86'] install: ['pip'] check: ['test'] @@ -101,7 +101,7 @@ jobs: include: # Basic dependencies only - os: ubuntu-latest - python-version: 3.7 + python-version: 3.8 install: pip check: test pip-flags: '' @@ -109,7 +109,7 @@ jobs: optional-depends: '' # Absolute minimum dependencies - os: ubuntu-latest - python-version: 3.7 + python-version: 3.8 install: pip check: test pip-flags: '' @@ -117,7 +117,7 @@ jobs: optional-depends: '' # Absolute minimum dependencies plus old MPL, Pydicom, Pillow - os: ubuntu-latest - python-version: 3.7 + python-version: 3.8 install: pip check: test pip-flags: '' @@ -125,7 +125,7 @@ jobs: optional-depends: MIN_OPT_DEPENDS # Clean install imports only with package-declared dependencies - os: ubuntu-latest - python-version: 3.7 + python-version: 3.8 install: pip check: skiptests pip-flags: '' From 620e74b1da932dbe249929a07e30faf2927bf26f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 07:41:48 -0500 Subject: [PATCH 170/702] MNT: Move think-before-you-act gitignores out of top level --- .gitignore | 20 +++----------------- nibabel/.gitignore | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 17 deletions(-) create mode 100644 nibabel/.gitignore diff --git a/.gitignore b/.gitignore index f644bb9182..983dd5aecf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# See also nibabel/.gitignore for test data rules that +# are meant to make you think before you `git add --force` + # Editor temporary/working/backup files # ######################################### .#* @@ -36,23 +39,6 @@ *.py[oc] *.so -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.bz2 -*.bzip2 -*.dmg -*.gz -*.iso -*.jar -*.rar -*.tar -*.tbz2 -*.tgz -*.zip - # Python files # ################ build/ diff --git a/nibabel/.gitignore b/nibabel/.gitignore new file mode 100644 index 0000000000..a89322fea3 --- /dev/null +++ b/nibabel/.gitignore @@ -0,0 +1,16 @@ +# Packages # +############ +# it's better to unpack these files and commit the raw source +# git has its own built in compression methods +*.7z +*.bz2 +*.bzip2 +*.dmg +*.gz +*.iso +*.jar +*.rar +*.tar +*.tbz2 +*.tgz +*.zip From 299290ced8c0a041155e9aa034e1814cab761442 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 07:42:51 -0500 Subject: [PATCH 171/702] MNT: Add some caching entries to gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 983dd5aecf..4e9cf81029 100644 --- a/.gitignore +++ b/.gitignore @@ -51,12 +51,14 @@ dist/ .coverage .ropeproject/ htmlcov/ +.*_cache/ # Logs and databases # ###################### *.log *.sql *.sqlite +*.sqlite3 # OS generated files # ###################### From cfa209a3e721a4d180700161e95f479c7a84706a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 07:53:39 -0500 Subject: [PATCH 172/702] TEST: Mark large data file test as xfail --- nibabel/nicom/tests/test_dicomwrappers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 3efa7f3aab..62076c042a 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -622,12 +622,14 @@ def test_image_position(self): assert MFW(fake_mf).image_position.dtype == float @dicom_test + @pytest.mark.xfail(reason='Not packaged in install', raises=FileNotFoundError) def test_affine(self): # Make sure we find orientation/position/spacing info dw = didw.wrapper_from_file(DATA_FILE_4D) aff = dw.affine @dicom_test + @pytest.mark.xfail(reason='Not packaged in install', raises=FileNotFoundError) def test_data_real(self): # The data in this file is (initially) a 1D gradient so it compresses # well. This just tests that the data ordering produces a consistent From 8439ce54c96a3dee78ee9104edfc51baf2ca9d33 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 16 Jun 2022 14:19:37 +0100 Subject: [PATCH 173/702] DOC: Remove DataobjImage docs from FileBasedImage --- nibabel/filebasedimages.py | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index e37a698f2f..938a17d7c3 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -74,7 +74,6 @@ class FileBasedImage: properties: - * shape * header methods: @@ -118,25 +117,6 @@ class FileBasedImage: img.to_file_map() - You can get the data out again with:: - - img.get_fdata() - - Less commonly, for some image types that support it, you might want to - fetch out the unscaled array via the object containing the data:: - - unscaled_data = img.dataoobj.get_unscaled() - - Analyze-type images (including nifti) support this, but others may not - (MINC, for example). - - Sometimes you might to avoid any loss of precision by making the - data type the same as the input:: - - hdr = img.header - hdr.set_data_dtype(data.dtype) - img.to_filename(fname) - **Files interface** The image has an attribute ``file_map``. This is a mapping, that has keys @@ -158,7 +138,7 @@ class FileBasedImage: contain enough information so that an existing image instance can save itself back to the files pointed to in ``file_map``. When a file holder holds active file-like objects, then these may be affected by the - initial file read; in this case, the contains file-like objects need to + initial file read; in this case, the file-like objects need to carry the position at which a write (with ``to_file_map``) should place the data. The ``file_map`` contents should therefore be such, that this will work. From 5b972c8d74d160f65f7cb6e1c2b2713a06405da4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 16 Jun 2022 14:44:18 +0100 Subject: [PATCH 174/702] MNT: Initial mypy configuration --- pyproject.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 5a1162a5a6..2658d42329 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -102,3 +102,9 @@ force-exclude = """ profile = "black" line_length = 99 extend_skip = ["_version.py", "externals"] + +[tool.mypy] +python_version = "3.11" +exclude = [ + "/tests", +] From 2d7d1b241dff9ed3303bfcde14a20307fb449852 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Jun 2022 09:10:25 +0100 Subject: [PATCH 175/702] CI: Add typing test to GitHub actions --- .github/workflows/misc.yml | 2 +- pyproject.toml | 1 + tools/ci/check.sh | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 1890488008..ade350aaa7 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -24,7 +24,7 @@ jobs: matrix: python-version: ["3.10"] install: ['pip'] - check: ['style', 'doctest'] + check: ['style', 'doctest', 'typing'] pip-flags: [''] depends: ['REQUIREMENTS'] env: diff --git a/pyproject.toml b/pyproject.toml index 2658d42329..6d44c607ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,6 +68,7 @@ test = [ "pytest-httpserver", "pytest-xdist", ] +typing = ["mypy", "pytest", "types-setuptools", "types-Pillow", "pydicom"] zstd = ["pyzstd >= 0.14.3"] [tool.hatch.build.targets.sdist] diff --git a/tools/ci/check.sh b/tools/ci/check.sh index 3cfc1e5530..bcb1a934e2 100755 --- a/tools/ci/check.sh +++ b/tools/ci/check.sh @@ -25,6 +25,8 @@ elif [ "${CHECK_TYPE}" == "test" ]; then cp ../.coveragerc . pytest --doctest-modules --doctest-plus --cov nibabel --cov-report xml \ --junitxml=test-results.xml -v --pyargs nibabel -n auto +elif [ "${CHECK_TYPE}" == "typing" ]; then + mypy nibabel else false fi From 24d5acb719b17264f72330a5948d7350a76b8ccb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 2 Jan 2023 00:32:20 -0500 Subject: [PATCH 176/702] ENH: Add type annotations to pacify mypy --- nibabel/analyze.py | 17 +++++++----- .../benchmarks/bench_arrayproxy_slicing.py | 2 +- nibabel/brikhead.py | 1 - nibabel/casting.py | 3 ++- nibabel/cmdline/dicomfs.py | 2 +- nibabel/ecat.py | 4 +-- nibabel/externals/netcdf.py | 4 +-- nibabel/filebasedimages.py | 16 ++++++----- nibabel/freesurfer/mghformat.py | 4 +-- nibabel/gifti/gifti.py | 15 +++++++---- nibabel/minc1.py | 12 +++++---- nibabel/minc2.py | 2 +- nibabel/nicom/dicomwrappers.py | 2 -- nibabel/nifti1.py | 14 +++++----- nibabel/openers.py | 2 +- nibabel/parrec.py | 4 +-- nibabel/pkg_info.py | 4 +-- nibabel/pydicom_compat.py | 27 +++++++++---------- nibabel/spatialimages.py | 5 +++- nibabel/spm99analyze.py | 2 +- nibabel/testing/np_features.py | 16 +++-------- nibabel/tmpdirs.py | 2 +- nibabel/volumeutils.py | 7 ++--- nibabel/wrapstruct.py | 6 +++-- 24 files changed, 90 insertions(+), 83 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index e165112259..fc44693bc6 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -81,6 +81,9 @@ can be loaded with and without a default flip, so the saved zoom will not constrain the affine. """ +from __future__ import annotations + +from typing import Type import numpy as np @@ -88,7 +91,7 @@ from .arraywriters import ArrayWriter, WriterError, get_slope_inter, make_array_writer from .batteryrunners import Report from .fileholders import copy_file_map -from .spatialimages import HeaderDataError, HeaderTypeError, SpatialImage +from .spatialimages import HeaderDataError, HeaderTypeError, SpatialHeader, SpatialImage from .volumeutils import ( apply_read_scaling, array_from_file, @@ -131,7 +134,7 @@ ('glmax', 'i4'), ('glmin', 'i4'), ] -data_history_dtd = [ +data_history_dtd: list[tuple[str, str] | tuple[str, str, tuple[int, ...]]] = [ ('descrip', 'S80'), ('aux_file', 'S24'), ('orient', 'S1'), @@ -172,7 +175,7 @@ data_type_codes = make_dt_codes(_dtdefs) -class AnalyzeHeader(LabeledWrapStruct): +class AnalyzeHeader(LabeledWrapStruct, SpatialHeader): """Class for basic analyze header Implements zoom-only setting of affine transform, and no image @@ -892,11 +895,11 @@ def may_contain_header(klass, binaryblock): class AnalyzeImage(SpatialImage): """Class for basic Analyze format image""" - header_class = AnalyzeHeader + header_class: Type[AnalyzeHeader] = AnalyzeHeader _meta_sniff_len = header_class.sizeof_hdr - files_types = (('image', '.img'), ('header', '.hdr')) - valid_exts = ('.img', '.hdr') - _compressed_suffixes = ('.gz', '.bz2', '.zst') + files_types: tuple[tuple[str, str], ...] = (('image', '.img'), ('header', '.hdr')) + valid_exts: tuple[str, ...] = ('.img', '.hdr') + _compressed_suffixes: tuple[str, ...] = ('.gz', '.bz2', '.zst') makeable = True rw = True diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index d313a7db5e..958923d7ea 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -26,7 +26,7 @@ # if memory_profiler is installed, we get memory usage results try: - from memory_profiler import memory_usage + from memory_profiler import memory_usage # type: ignore except ImportError: memory_usage = None diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 470ed16664..54b6d021f3 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -27,7 +27,6 @@ am aware) always be >= 1. This permits sub-brick indexing common in AFNI programs (e.g., example4d+orig'[0]'). """ - import os import re from copy import deepcopy diff --git a/nibabel/casting.py b/nibabel/casting.py index a17a25a2c8..6232c615b5 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -3,6 +3,7 @@ Most routines work round some numpy oddities in floating point precision and casting. Others work round numpy casting to and from python ints """ +from __future__ import annotations import warnings from numbers import Integral @@ -110,7 +111,7 @@ def float_to_int(arr, int_type, nan2zero=True, infmax=False): # Cache range values -_SHARED_RANGES = {} +_SHARED_RANGES: dict[tuple[type, type], tuple[np.number, np.number]] = {} def shared_range(flt_type, int_type): diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 8de1438544..85d7d8dcad 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -25,7 +25,7 @@ class dummy_fuse: try: - import fuse + import fuse # type: ignore uid = os.getuid() gid = os.getgid() diff --git a/nibabel/ecat.py b/nibabel/ecat.py index de81d8bbe8..8b11e881a7 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -50,7 +50,7 @@ from .arraywriters import make_array_writer from .fileslice import canonical_slicers, predict_shape, slice2outax -from .spatialimages import SpatialImage +from .spatialimages import SpatialHeader, SpatialImage from .volumeutils import array_from_file, make_dt_codes, native_code, swapped_code from .wrapstruct import WrapStruct @@ -243,7 +243,7 @@ patient_orient_neurological = [1, 3, 5, 7] -class EcatHeader(WrapStruct): +class EcatHeader(WrapStruct, SpatialHeader): """Class for basic Ecat PET header Sub-parts of standard Ecat File diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py index 2fddcf03d9..b8d1244c0c 100644 --- a/nibabel/externals/netcdf.py +++ b/nibabel/externals/netcdf.py @@ -871,6 +871,7 @@ def __setattr__(self, attr, value): pass self.__dict__[attr] = value + @property def isrec(self): """Returns whether the variable has a record dimension or not. @@ -881,8 +882,8 @@ def isrec(self): """ return bool(self.data.shape) and not self._shape[0] - isrec = property(isrec) + @property def shape(self): """Returns the shape tuple of the data variable. @@ -890,7 +891,6 @@ def shape(self): same manner of other numpy arrays. """ return self.data.shape - shape = property(shape) def getValue(self): """ diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 938a17d7c3..598a735d23 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -7,9 +7,11 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Common interface for any image format--volume or surface, binary or xml.""" +from __future__ import annotations import io from copy import deepcopy +from typing import Type from urllib import request from .fileholders import FileHolder @@ -144,14 +146,14 @@ class FileBasedImage: work. """ - header_class = FileBasedHeader - _meta_sniff_len = 0 - files_types = (('image', None),) - valid_exts = () - _compressed_suffixes = () + header_class: Type[FileBasedHeader] = FileBasedHeader + _meta_sniff_len: int = 0 + files_types: tuple[tuple[str, str | None], ...] = (('image', None),) + valid_exts: tuple[str, ...] = () + _compressed_suffixes: tuple[str, ...] = () - makeable = True # Used in test code - rw = True # Used in test code + makeable: bool = True # Used in test code + rw: bool = True # Used in test code def __init__(self, header=None, extra=None, file_map=None): """Initialize image diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index cb86b4400b..6b97056524 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -21,7 +21,7 @@ from ..fileholders import FileHolder from ..filename_parser import _stringify_path from ..openers import ImageOpener -from ..spatialimages import HeaderDataError, SpatialImage +from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage from ..volumeutils import Recoder, array_from_file, array_to_file, endian_codes from ..wrapstruct import LabeledWrapStruct @@ -87,7 +87,7 @@ class MGHError(Exception): """ -class MGHHeader(LabeledWrapStruct): +class MGHHeader(LabeledWrapStruct, SpatialHeader): """Class for MGH format header The header also consists of the footer data which MGH places after the data diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index c80fbf2e22..919e4faef2 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -11,10 +11,12 @@ The Gifti specification was (at time of writing) available as a PDF download from http://www.nitrc.org/projects/gifti/ """ +from __future__ import annotations import base64 import sys import warnings +from typing import Type import numpy as np @@ -577,7 +579,7 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): # The parser will in due course be a GiftiImageParser, but we can't set # that now, because it would result in a circular import. We set it after # the class has been defined, at the end of the class definition. - parser = None + parser: Type[xml.XmlParser] def __init__( self, @@ -832,7 +834,7 @@ def _to_xml_element(self): GIFTI.append(dar._to_xml_element()) return GIFTI - def to_xml(self, enc='utf-8'): + def to_xml(self, enc='utf-8') -> bytes: """Return XML corresponding to image content""" header = b""" @@ -840,9 +842,12 @@ def to_xml(self, enc='utf-8'): return header + super().to_xml(enc) # Avoid the indirection of going through to_file_map - to_bytes = to_xml + def to_bytes(self, enc='utf-8'): + return self.to_xml(enc=enc) - def to_file_map(self, file_map=None): + to_bytes.__doc__ = SerializableImage.to_bytes.__doc__ + + def to_file_map(self, file_map=None, enc='utf-8'): """Save the current image to the specified file_map Parameters @@ -858,7 +863,7 @@ def to_file_map(self, file_map=None): if file_map is None: file_map = self.file_map with file_map['image'].get_prepare_fileobj('wb') as f: - f.write(self.to_xml()) + f.write(self.to_xml(enc=enc)) @classmethod def from_file_map(klass, file_map, buffer_size=35000000, mmap=True): diff --git a/nibabel/minc1.py b/nibabel/minc1.py index fb183277bc..b9d4bc2074 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -7,8 +7,10 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read MINC1 format images""" +from __future__ import annotations from numbers import Integral +from typing import Type import numpy as np @@ -305,11 +307,11 @@ class Minc1Image(SpatialImage): load. """ - header_class = Minc1Header - _meta_sniff_len = 4 - valid_exts = ('.mnc',) - files_types = (('image', '.mnc'),) - _compressed_suffixes = ('.gz', '.bz2', '.zst') + header_class: Type[MincHeader] = Minc1Header + _meta_sniff_len: int = 4 + valid_exts: tuple[str, ...] = ('.mnc',) + files_types: tuple[tuple[str, str], ...] = (('image', '.mnc'),) + _compressed_suffixes: tuple[str, ...] = ('.gz', '.bz2', '.zst') makeable = True rw = False diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 1fffae0c86..cdb567a996 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -155,7 +155,7 @@ class Minc2Image(Minc1Image): def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Import of h5py might take awhile for MPI-enabled builds # So we are importing it here "on demand" - import h5py + import h5py # type: ignore holder = file_map['image'] if holder.filename is None: diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 7e6bea9009..572957f391 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -127,8 +127,6 @@ class Wrapper: is_multiframe = False b_matrix = None q_vector = None - b_value = None - b_vector = None def __init__(self, dcm_data): """Initialize wrapper diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 61a6da3660..9bb88e844c 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -10,8 +10,11 @@ NIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/ """ +from __future__ import annotations + import warnings from io import BytesIO +from typing import Type import numpy as np import numpy.linalg as npl @@ -87,8 +90,8 @@ # datatypes not in analyze format, with codes if have_binary128(): # Only enable 128 bit floats if we really have IEEE binary 128 longdoubles - _float128t = np.longdouble - _complex256t = np.longcomplex + _float128t: Type[np.generic] = np.longdouble + _complex256t: Type[np.generic] = np.longcomplex else: _float128t = np.void _complex256t = np.void @@ -1814,7 +1817,7 @@ class Nifti1PairHeader(Nifti1Header): class Nifti1Pair(analyze.AnalyzeImage): """Class for NIfTI1 format image, header pair""" - header_class = Nifti1PairHeader + header_class: Type[Nifti1Header] = Nifti1PairHeader _meta_sniff_len = header_class.sizeof_hdr rw = True @@ -1848,9 +1851,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtyp self._affine2header() # Copy docstring - __init__.__doc__ = ( - analyze.AnalyzeImage.__init__.__doc__ - + """ + __init__.__doc__ = f"""{analyze.AnalyzeImage.__init__.__doc__} Notes ----- @@ -1863,7 +1864,6 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtyp :meth:`set_qform` methods can be used to update the codes after an image has been created - see those methods, and the :ref:`manual ` for more details. """ - ) def update_header(self): """Harmonize header with image data and affine diff --git a/nibabel/openers.py b/nibabel/openers.py index 4a1b911c95..d75839fe1a 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -20,7 +20,7 @@ # is indexed_gzip present and modern? try: - import indexed_gzip as igzip + import indexed_gzip as igzip # type: ignore version = igzip.__version__ diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 27ade56ae9..7c594dcb45 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -1338,7 +1338,7 @@ def from_filename( strict_sort=strict_sort, ) - load = from_filename + load = from_filename # type: ignore -load = PARRECImage.load +load = PARRECImage.from_filename diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 068600b4e6..73dfd92ed2 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -70,7 +70,7 @@ def cmp_pkg_version(version_str: str, pkg_version_str: str = __version__) -> int return _cmp(Version(version_str), Version(pkg_version_str)) -def pkg_commit_hash(pkg_path: str = None) -> tuple[str, str]: +def pkg_commit_hash(pkg_path: str | None = None) -> tuple[str, str]: """Get short form of commit hash In this file is a variable called COMMIT_HASH. This contains a substitution @@ -109,7 +109,7 @@ def pkg_commit_hash(pkg_path: str = None) -> tuple[str, str]: cwd=pkg_path, ) if proc.stdout: - return 'repository', proc.stdout.strip() + return 'repository', proc.stdout.decode().strip() return '(none found)', '' diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index 9ee2553c5a..4d9df7df7b 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -7,7 +7,7 @@ without error, and always defines: * have_dicom : True if we can import pydicom or dicom; -* pydicom : pydicom module or dicom module or None of not importable; +* pydicom : pydicom module or dicom module or None if not importable; * read_file : ``read_file`` function if pydicom or dicom module is importable else None; * tag_for_keyword : ``tag_for_keyword`` function if pydicom or dicom module @@ -19,26 +19,25 @@ A deprecated copy is available here for backward compatibility. """ +from __future__ import annotations -# Module has (apparently) unused imports; stop flake8 complaining -# flake8: noqa +from typing import Callable from .deprecated import deprecate_with_version +from .optpkg import optional_package -have_dicom = True -pydicom = read_file = tag_for_keyword = Sequence = None +pydicom, have_dicom, _ = optional_package('pydicom') -try: - import pydicom -except ImportError: - have_dicom = False -else: # pydicom module available - # Values not imported by default - import pydicom.values - from pydicom.dicomio import read_file - from pydicom.sequence import Sequence +read_file: Callable | None = None +tag_for_keyword: Callable | None = None +Sequence: type | None = None if have_dicom: + # Values not imported by default + import pydicom.values # type: ignore + from pydicom.dicomio import read_file # noqa:F401 + from pydicom.sequence import Sequence # noqa:F401 + tag_for_keyword = pydicom.datadict.tag_for_keyword diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 794bb750e6..4bd25e986f 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -129,6 +129,9 @@ >>> np.all(img3.get_fdata(dtype=np.float32) == data) True """ +from __future__ import annotations + +from typing import Type import numpy as np @@ -400,7 +403,7 @@ def slice_affine(self, slicer): class SpatialImage(DataobjImage): """Template class for volumetric (3D/4D) images""" - header_class = SpatialHeader + header_class: Type[SpatialHeader] = SpatialHeader ImageSlicer = SpatialFirstSlicer def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 12e3cb658d..a089bedb02 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -274,7 +274,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): contents = matf.read() if len(contents) == 0: return ret - import scipy.io as sio + import scipy.io as sio # type: ignore mats = sio.loadmat(BytesIO(contents)) if 'mat' in mats: # this overrides a 'M', and includes any flip diff --git a/nibabel/testing/np_features.py b/nibabel/testing/np_features.py index c0739a8502..eeb783900a 100644 --- a/nibabel/testing/np_features.py +++ b/nibabel/testing/np_features.py @@ -1,24 +1,16 @@ """Look for changes in numpy behavior over versions """ +from functools import lru_cache import numpy as np -def memmap_after_ufunc(): +@lru_cache(maxsize=None) +def memmap_after_ufunc() -> bool: """Return True if ufuncs on memmap arrays always return memmap arrays This should be True for numpy < 1.12, False otherwise. - - Memoize after first call. We do this to avoid having to call this when - importing nibabel.testing, because we cannot depend on the source file - being present - see gh-571. """ - if memmap_after_ufunc.result is not None: - return memmap_after_ufunc.result with open(__file__, 'rb') as fobj: mm_arr = np.memmap(fobj, mode='r', shape=(10,), dtype=np.uint8) - memmap_after_ufunc.result = isinstance(mm_arr + 1, np.memmap) - return memmap_after_ufunc.result - - -memmap_after_ufunc.result = None + return isinstance(mm_arr + 1, np.memmap) diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 5a8eccfa2c..a3be77ffa8 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -16,7 +16,7 @@ from contextlib import chdir as _chdir except ImportError: # PY310 - @contextmanager + @contextmanager # type: ignore def _chdir(path): cwd = os.getcwd() os.chdir(path) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index b339b6bab5..225062b2cb 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utility functions for analyze-like formats""" +from __future__ import annotations import gzip import sys @@ -29,7 +30,7 @@ native_code = sys_is_le and '<' or '>' swapped_code = sys_is_le and '>' or '<' -endian_codes = ( # numpy code, aliases +_endian_codes = ( # numpy code, aliases ('<', 'little', 'l', 'le', 'L', 'LE'), ('>', 'big', 'BIG', 'b', 'be', 'B', 'BE'), (native_code, 'native', 'n', 'N', '=', '|', 'i', 'I'), @@ -41,7 +42,7 @@ default_compresslevel = 1 #: file-like classes known to hold compressed data -COMPRESSED_FILE_LIKES = (gzip.GzipFile, BZ2File, IndexedGzipFile) +COMPRESSED_FILE_LIKES: tuple[type, ...] = (gzip.GzipFile, BZ2File, IndexedGzipFile) # Enable .zst support if pyzstd installed. if HAVE_ZSTD: @@ -220,7 +221,7 @@ def value_set(self, name=None): # Endian code aliases -endian_codes = Recoder(endian_codes) +endian_codes = Recoder(_endian_codes) class DtypeMapper: diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index bf29e0828a..6e236d7356 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -109,11 +109,13 @@ nib.imageglobals.logger = logger """ +from __future__ import annotations + import numpy as np from . import imageglobals as imageglobals from .batteryrunners import BatteryRunner -from .volumeutils import endian_codes, native_code, pretty_mapping, swapped_code +from .volumeutils import Recoder, endian_codes, native_code, pretty_mapping, swapped_code class WrapStructError(Exception): @@ -482,7 +484,7 @@ def _get_checks(klass): class LabeledWrapStruct(WrapStruct): """A WrapStruct with some fields having value labels for printing etc""" - _field_recoders = {} # for recoding values for str + _field_recoders: dict[str, Recoder] = {} # for recoding values for str def get_value_label(self, fieldname): """Returns label for coded field From 85f4cb482070cd6ee2b63cb754bb8da000a866e5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 11:40:34 -0500 Subject: [PATCH 177/702] MNT: Add mypy pre-commit hook --- .pre-commit-config.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e99b9570d6..addd5f5634 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,3 +25,15 @@ repos: hooks: - id: flake8 exclude: "^(doc|nisext|tools)/" + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v0.991 + hooks: + - id: mypy + # Sync with project.optional-dependencies.typing + additional_dependencies: + - pytest + - types-setuptools + - types-Pillow + - pydicom + # Sync with tool.mypy['exclude'] + exclude: "^(doc|nisext|tools)/|.*/tests/" From 8e62e9983222f05a691df1dfc06d900751a50e70 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 13:21:50 -0500 Subject: [PATCH 178/702] ENH: Support multiline header fields in TCK Treat repeated keys as adding lines to the field Closes gh-957 --- nibabel/streamlines/tck.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index ec8e7dbce7..144c7bef26 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -6,6 +6,7 @@ import os import warnings +from contextlib import suppress import numpy as np @@ -331,6 +332,8 @@ def _read_header(cls, fileobj): f.seek(1, os.SEEK_CUR) # Skip \n found_end = False + key = None + tmp_hdr = {} # Read all key-value pairs contained in the header, stop at EOF for n_line, line in enumerate(f, 1): @@ -343,15 +346,22 @@ def _read_header(cls, fileobj): found_end = True break - if ':' not in line: # Invalid header line + # Set new key if available, otherwise append to last known key + with suppress(ValueError): + key, line = line.split(':', 1) + key = key.strip() + + # Apparent continuation line before any keys are found + if key is None: raise HeaderError(f'Invalid header (line {n_line}): {line}') - key, value = line.split(':', 1) - hdr[key.strip()] = value.strip() + tmp_hdr.setdefault(key, []).append(line.strip()) if not found_end: raise HeaderError('Missing END in the header.') + hdr.update({key: '\n'.join(val) for key, val in tmp_hdr.items()}) + offset_data = f.tell() # Set the file position where it was, in case it was previously open From be2cc874496d3a2af1dba14651c2702592efc676 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 13:40:19 -0500 Subject: [PATCH 179/702] ENH: Remove restriction on writing newlines in header --- nibabel/streamlines/tck.py | 5 ----- nibabel/streamlines/tests/test_tck.py | 4 ---- 2 files changed, 9 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 144c7bef26..43df2f87e0 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -267,11 +267,6 @@ def _write_header(fileobj, header): ) out = '\n'.join(lines) - # Check the header is well formatted. - if out.count('\n') > len(lines) - 1: # \n only allowed between lines. - msg = f"Key-value pairs cannot contain '\\n':\n{out}" - raise HeaderError(msg) - if out.count(':') > len(lines): # : only one per line (except the last one which contains END). msg = f"Key-value pairs cannot contain ':':\n{out}" diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index f514d3f3df..eb464042df 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -192,10 +192,6 @@ def test_write_simple_file(self): # TCK file containing not well formatted entries in its header. tck_file = BytesIO() tck = TckFile(tractogram) - tck.header['new_entry'] = 'value\n' # \n not allowed - with pytest.raises(HeaderError): - tck.save(tck_file) - tck.header['new_entry'] = 'val:ue' # : not allowed with pytest.raises(HeaderError): tck.save(tck_file) From 95df42eab6ae1806981213a210e2e79bd568ed81 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 13:40:57 -0500 Subject: [PATCH 180/702] TEST: Test that multiline header items can be parsed --- nibabel/streamlines/tests/test_tck.py | 9 +++++++++ nibabel/tests/data/multiline_header_field.tck | Bin 0 -> 4411 bytes 2 files changed, 9 insertions(+) create mode 100644 nibabel/tests/data/multiline_header_field.tck diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index eb464042df..3df7dd4f2d 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -31,6 +31,7 @@ def setup_module(): # standard.tck contains only streamlines DATA['standard_tck_fname'] = pjoin(data_path, 'standard.tck') DATA['matlab_nan_tck_fname'] = pjoin(data_path, 'matlab_nan.tck') + DATA['multiline_header_fname'] = pjoin(data_path, 'multiline_header_field.tck') DATA['streamlines'] = [ np.arange(1 * 3, dtype='f4').reshape((1, 3)), @@ -87,6 +88,14 @@ def test_load_matlab_nan_file(self): assert len(streamlines) == 1 assert streamlines[0].shape == (108, 3) + def test_load_multiline_header_file(self): + for lazy_load in [False, True]: + tck = TckFile.load(DATA['multiline_header_fname'], lazy_load=lazy_load) + streamlines = list(tck.tractogram.streamlines) + assert len(tck.header['command_history'].splitlines()) == 3 + assert len(streamlines) == 1 + assert streamlines[0].shape == (253, 3) + def test_writeable_data(self): data = DATA['simple_tractogram'] for key in ('simple_tck_fname', 'simple_tck_big_endian_fname'): diff --git a/nibabel/tests/data/multiline_header_field.tck b/nibabel/tests/data/multiline_header_field.tck new file mode 100644 index 0000000000000000000000000000000000000000..42ebedc43a94cb15657a7e52fb6e89196b258f3b GIT binary patch literal 4411 zcmeH{OKaRP5XbkdPcg^#V6VNeB!(UWN$4SML)lX=Lbc_!x_TICoMd}xK3YFo$M$+5 zEumTZfPe+U!^j$q{_`6dk6zsadXaAf%UxqJgJc*fD+CJM)iQ%C?F7ze*O&B{F~SzS zQURUcWq|p1BP~S2ZnT8iEM7un#JwipY?+X)Y-^`v*6vPvNaF;i#^Ur#b3e^(5vHHc zZ$2jJ)O6|a5a!79P)x5rT<{xLH(H=_b}da!mfZi%+ttAr8&yISq%62HVk7@{eEv>^ zZGVU%>mP;>Qp+4+))xVyyg^r%ki?|lvqW1{G0|P4hpG^iJ~01SH~EAMLI zg$XfL7U0#b^nv7mGheF^8p&^$@hm=l&1h8?3ZXCFE?thdPzH)UbX9U#>D%tG)cHGv1tPJIw5v_G^HZM+eQ-#u99>6I5oc`QHTyD= z29N5*T!VFkJfN(3P+zH+lX$f_Ysblo`R#%ntd}wHTKewK3>GX@y!MjXowiFlEoH$q zKBoZB;_$fUWl!WsZ%oadkn z&2~KO=V*_bj0}tnyhH{bDCuM8OT6cp^VpyNlYwJ Date: Tue, 3 Jan 2023 21:08:44 -0500 Subject: [PATCH 181/702] ENH: Fully annotated nibabel.filebasedimages --- nibabel/filebasedimages.py | 59 ++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 598a735d23..82398bac18 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -10,6 +10,8 @@ from __future__ import annotations import io +import os +import typing as ty from copy import deepcopy from typing import Type from urllib import request @@ -18,6 +20,10 @@ from .filename_parser import TypesFilenamesError, splitext_addext, types_filenames from .openers import ImageOpener +FileSpec = ty.Union[str, os.PathLike] +FileMap = ty.Mapping[str, FileHolder] +FileSniff = ty.Tuple[bytes, str] + class ImageFileError(Exception): pass @@ -41,10 +47,10 @@ def from_header(klass, header=None): ) @classmethod - def from_fileobj(klass, fileobj): + def from_fileobj(klass, fileobj: io.IOBase): raise NotImplementedError - def write_to(self, fileobj): + def write_to(self, fileobj: io.IOBase): raise NotImplementedError def __eq__(self, other): @@ -53,7 +59,7 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def copy(self): + def copy(self) -> FileBasedHeader: """Copy object to independent representation The copy should not be affected by any changes to the original @@ -155,7 +161,12 @@ class FileBasedImage: makeable: bool = True # Used in test code rw: bool = True # Used in test code - def __init__(self, header=None, extra=None, file_map=None): + def __init__( + self, + header: FileBasedHeader | ty.Mapping | None = None, + extra: ty.Mapping | None = None, + file_map: FileMap | None = None, + ): """Initialize image The image is a combination of (header), with @@ -182,14 +193,14 @@ def __init__(self, header=None, extra=None, file_map=None): self.file_map = file_map @property - def header(self): + def header(self) -> FileBasedHeader: return self._header def __getitem__(self, key): """No slicing or dictionary interface for images""" raise TypeError('Cannot slice image objects.') - def get_filename(self): + def get_filename(self) -> str | None: """Fetch the image filename Parameters @@ -210,7 +221,7 @@ def get_filename(self): characteristic_type = self.files_types[0][0] return self.file_map[characteristic_type].filename - def set_filename(self, filename): + def set_filename(self, filename: str): """Sets the files in the object from a given filename The different image formats may check whether the filename has @@ -228,16 +239,16 @@ def set_filename(self, filename): self.file_map = self.__class__.filespec_to_file_map(filename) @classmethod - def from_filename(klass, filename): + def from_filename(klass, filename: FileSpec): file_map = klass.filespec_to_file_map(filename) return klass.from_file_map(file_map) @classmethod - def from_file_map(klass, file_map): + def from_file_map(klass, file_map: FileMap): raise NotImplementedError @classmethod - def filespec_to_file_map(klass, filespec): + def filespec_to_file_map(klass, filespec: FileSpec): """Make `file_map` for this class from filename `filespec` Class method @@ -271,7 +282,7 @@ def filespec_to_file_map(klass, filespec): file_map[key] = FileHolder(filename=fname) return file_map - def to_filename(self, filename, **kwargs): + def to_filename(self, filename: FileSpec, **kwargs): r"""Write image to files implied by filename string Parameters @@ -290,11 +301,11 @@ def to_filename(self, filename, **kwargs): self.file_map = self.filespec_to_file_map(filename) self.to_file_map(**kwargs) - def to_file_map(self, file_map=None, **kwargs): + def to_file_map(self, file_map: FileMap | None = None, **kwargs): raise NotImplementedError @classmethod - def make_file_map(klass, mapping=None): + def make_file_map(klass, mapping: ty.Mapping[str, str | io.IOBase] | None = None): """Class method to make files holder for this image type Parameters @@ -327,7 +338,7 @@ def make_file_map(klass, mapping=None): load = from_filename @classmethod - def instance_to_filename(klass, img, filename): + def instance_to_filename(klass, img: FileBasedImage, filename: FileSpec): """Save `img` in our own format, to name implied by `filename` This is a class method @@ -343,7 +354,7 @@ def instance_to_filename(klass, img, filename): img.to_filename(filename) @classmethod - def from_image(klass, img): + def from_image(klass, img: FileBasedImage): """Class method to create new instance of own class from `img` Parameters @@ -359,7 +370,12 @@ def from_image(klass, img): raise NotImplementedError() @classmethod - def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): + def _sniff_meta_for( + klass, + filename: FileSpec, + sniff_nbytes: int, + sniff: FileSniff | None = None, + ): """Sniff metadata for image represented by `filename` Parameters @@ -404,7 +420,12 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): return (binaryblock, meta_fname) @classmethod - def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): + def path_maybe_image( + klass, + filename: FileSpec, + sniff: FileSniff | None = None, + sniff_max: int = 1024, + ): """Return True if `filename` may be image matching this class Parameters @@ -547,7 +568,7 @@ def from_bytes(klass, bytestring: bytes): Parameters ---------- - bstring : bytes + bytestring : bytes Byte string containing the on-disk representation of an image """ return klass.from_stream(io.BytesIO(bytestring)) @@ -571,7 +592,7 @@ def to_bytes(self, **kwargs) -> bytes: return bio.getvalue() @classmethod - def from_url(klass, url, timeout=5): + def from_url(klass, url: str | request.Request, timeout: float = 5): """Retrieve and load an image from a URL Class method From 8eee7f592a0ee7f9ba9a161425e402e25bb269ad Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 21:10:12 -0500 Subject: [PATCH 182/702] ENH: Add arrayproxy.ArrayLike protocol --- nibabel/arrayproxy.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 5a2bae02c0..5bc355c6d4 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -25,11 +25,15 @@ See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks. """ +from __future__ import annotations + +import typing as ty import warnings from contextlib import contextmanager from threading import RLock import numpy as np +import numpy.typing as npt from . import openers from .fileslice import canonical_slicers, fileslice @@ -53,7 +57,24 @@ KEEP_FILE_OPEN_DEFAULT = False -class ArrayProxy: +class ArrayLike(ty.Protocol): + """Protocol for numpy ndarray-like objects + + This is more stringent than :class:`numpy.typing.ArrayLike`, but guarantees + access to shape, ndim and slicing. + """ + + shape: tuple[int, ...] + ndim: int + + def __array__(self, dtype: npt.DTypeLike | None = None, /) -> npt.NDArray: + ... + + def __getitem__(self, key, /) -> npt.NDArray: + ... + + +class ArrayProxy(ArrayLike): """Class to act as proxy for the array that can be read from a file The array proxy allows us to freeze the passed fileobj and header such that From 05a0b2fc2eae4d99a01f3ad0971b460e7186a632 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 3 Jan 2023 21:10:42 -0500 Subject: [PATCH 183/702] ENH: Fully annotate nibabel.dataobj_images --- nibabel/dataobj_images.py | 52 ++++++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 11 deletions(-) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 5c8de66674..ca5b4b89c3 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -7,17 +7,31 @@ * returns an array from ``numpy.asanyarray(obj)``; * has an attribute or property ``shape``. """ +from __future__ import annotations + +import typing as ty import numpy as np +import numpy.typing as npt +from .arrayproxy import ArrayLike from .deprecated import deprecate_with_version -from .filebasedimages import FileBasedImage +from .filebasedimages import FileBasedHeader, FileBasedImage, FileMap, FileSpec class DataobjImage(FileBasedImage): """Template class for images that have dataobj data stores""" - def __init__(self, dataobj, header=None, extra=None, file_map=None): + _data_cache: np.ndarray | None + _fdata_cache: np.ndarray | None + + def __init__( + self, + dataobj: ArrayLike, + header: FileBasedHeader | ty.Mapping | None = None, + extra: ty.Mapping | None = None, + file_map: FileMap | None = None, + ): """Initialize dataobj image The datobj image is a combination of (dataobj, header), with optional @@ -40,11 +54,11 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): """ super().__init__(header=header, extra=extra, file_map=file_map) self._dataobj = dataobj - self._fdata_cache = None self._data_cache = None + self._fdata_cache = None @property - def dataobj(self): + def dataobj(self) -> ArrayLike: return self._dataobj @deprecate_with_version( @@ -202,7 +216,11 @@ def get_data(self, caching='fill'): self._data_cache = data return data - def get_fdata(self, caching='fill', dtype=np.float64): + def get_fdata( + self, + caching: ty.Literal['fill', 'unchanged'] = 'fill', + dtype: npt.DTypeLike = np.float64, + ) -> np.ndarray: """Return floating point image data with necessary scaling applied The image ``dataobj`` property can be an array proxy or an array. An @@ -351,7 +369,7 @@ def get_fdata(self, caching='fill', dtype=np.float64): return data @property - def in_memory(self): + def in_memory(self) -> bool: """True when any array data is in memory cache There are separate caches for `get_data` reads and `get_fdata` reads. @@ -363,7 +381,7 @@ def in_memory(self): or self._data_cache is not None ) - def uncache(self): + def uncache(self) -> None: """Delete any cached read of data from proxied data Remember there are two types of images: @@ -392,15 +410,21 @@ def uncache(self): self._data_cache = None @property - def shape(self): + def shape(self) -> tuple[int, ...]: return self._dataobj.shape @property - def ndim(self): + def ndim(self) -> int: return self._dataobj.ndim @classmethod - def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + def from_file_map( + klass, + file_map: FileMap, + *, + mmap: bool | ty.Literal['c', 'r'] = True, + keep_file_open: bool | None = None, + ): """Class method to create image from mapping in ``file_map`` Parameters @@ -433,7 +457,13 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): raise NotImplementedError @classmethod - def from_filename(klass, filename, *, mmap=True, keep_file_open=None): + def from_filename( + klass, + filename: FileSpec, + *, + mmap: bool | ty.Literal['c', 'r'] = True, + keep_file_open: bool | None = None, + ): """Class method to create image from filename `filename` Parameters From 570101bc0b576902a6b556412da287843d43d456 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 13:25:50 -0500 Subject: [PATCH 184/702] FIX: Place numpy.typing behind ty.TYPE_CHECKING guards --- nibabel/arrayproxy.py | 9 ++++++--- nibabel/dataobj_images.py | 4 +++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 5bc355c6d4..7213e65769 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -33,7 +33,6 @@ from threading import RLock import numpy as np -import numpy.typing as npt from . import openers from .fileslice import canonical_slicers, fileslice @@ -57,6 +56,10 @@ KEEP_FILE_OPEN_DEFAULT = False +if ty.TYPE_CHECKING: # pragma: no cover + import numpy.typing as npt + + class ArrayLike(ty.Protocol): """Protocol for numpy ndarray-like objects @@ -68,10 +71,10 @@ class ArrayLike(ty.Protocol): ndim: int def __array__(self, dtype: npt.DTypeLike | None = None, /) -> npt.NDArray: - ... + ... # pragma: no cover def __getitem__(self, key, /) -> npt.NDArray: - ... + ... # pragma: no cover class ArrayProxy(ArrayLike): diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index ca5b4b89c3..4d884be66a 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -12,12 +12,14 @@ import typing as ty import numpy as np -import numpy.typing as npt from .arrayproxy import ArrayLike from .deprecated import deprecate_with_version from .filebasedimages import FileBasedHeader, FileBasedImage, FileMap, FileSpec +if ty.TYPE_CHECKING: # pragma: no cover + import numpy.typing as npt + class DataobjImage(FileBasedImage): """Template class for images that have dataobj data stores""" From 1389ece7bc4e98a0fa2c23d2f254dcc75df320e0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Jan 2023 08:19:45 -0500 Subject: [PATCH 185/702] DOC: Update changelog --- Changelog | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/Changelog b/Changelog index d1cad17aa8..2eec48fa6b 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,89 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +5.0.0 (Monday 9 January 2023) +============================= + +New feature release in the 5.0.x series. + +New features +------------ +* :class:`~nibabel.filebasedimages.SerializableImage` now has + :meth:`~nibabel.filebasedimages.SerializableImage.to_stream()` and + :meth:`~nibabel.filebasedimages.SerializableImage.from_stream()` methods to + read/write streams implementing the :class:`~io.IOBase` interface. A + :meth:`~nibabel.filebasedimages.SerializableImage.from_url()` method + enables loading images from URLs. (pr/1129) (CM, reviewed by MB) +* :class:`~nibabel.streamlines.trk.TrkFile` supports TRKv3, an + undocumented but generally compatible variant of TRKv2. (pr/1125) (CM) + +Enhancements +------------ +* Support multiline header fields in :class:`~nibabel.streamlines.tck.TCKFile` + (pr/1175) (CM, reviewed by Matt Cieslak) +* Make layout order an initialization parameter of + :class:`~nibabel.arrayproxy.ArrayProxy` (pr/1131) (CM, reviewed by MB) +* Initial support for type annotations. (pr/1115, pr/1178) (CM, reviewed by + Zvi Baratz) + +Bug fixes +--------- +* Handle extension/file-format mismatches implemented incompletely in pr/1013 + (pr/1138) (CM, reviewed by Thomas Phil) +* Improve handling of invalid TCK files, which could sometimes cause an + infinite loop (pr/1140) (Anibal Solon, reviewed by CM) +* Clean up ECAT test case that left filehandle open and failed to use class + variables (pr/1155) (Dimitri Papadopoulos, reviewed by CM) + +Maintenance +----------- +* Simplify TCK reading code by assuming files are open in binary mode + (pr/1142) (Anibal Solon, reviewed by MC, CM) +* Code support for tests covering deprecated functionality (pr/1159) (CM) +* Miscellaneous code cleanups (pr/1148, pr/1149, pr/1153, pr/1154, pr/1156) + (Dimitri Papadopoulos, reviewed by CM) +* Update CI to build, test and deploy PyPI artifacts (pr/1134) (CM, reviewed + by MB) +* Transition from ``setup.cfg`` to ``pyproject.toml`` package configuration + (pr/1133) (CM, reviewed by MB) +* Addressed race conditions preventing running tests with pytest-xdist_. + (pr/1157, pr/1158) (CM, reviewed by Christian Haselgrove) +* Apply blue_ and isort_ auto-formatters and provide pre-commit_ configuration + to reduce human burden of style guidelines. (pr/1124, pr/1165, pr/1169) + (CM and Zvi Baratz) +* Manage versioning with setuptools_scm_ (pr/1171) (CM, reviewed by Zvi Baratz) +* Reduce installed package size by excluding very large test file (pr/1176) + (CM, reviewed by Zvi Baratz) + +API changes and deprecations +---------------------------- +* Passing an ``int64`` array to :class:`~nibabel.nifti1.Nifti1Image` without a + header or dtype argument will raise a ``ValueError``. (pr/1173) (CM) +* :class:`nibabel.tmpdirs.TemporaryDirectory` is deprecated in favor of + :class:`tempfile.TemporaryDirectory`. (pr/1172) (CM, reviewed by Zvi + Baratz) +* The ``nisext`` package is deprecated and will be removed in NiBabel 6.0. + (pr/1170) (CM, reviewed by MB) +* Drop support for Python 3.7, Numpy < 1.19 (pr/1177) (CM) +* The following deprecated functions and methods will now raise + ``ExpiredDeprecationError``\s + + * :func:`nibabel.loadsave.read_img_data` + * :meth:`nibabel.dataobj_images.DataobjImage.get_data` + * :func:`nibabel.loadsave.guessed_image_type` + * :func:`nibabel.onetime.setattr_on_read` + * :func:`nibabel.orientations.flip_axis` + +* Modules, classes and functions that expired at 4.0 were fully removed. + ``ExpiredDeprecationError``\s will now be ``AttributeError``\s. + +.. _blue: https://blue.readthedocs.io +.. _isort: https://pycqa.github.io/isort/ +.. _pre-commit: https://pre-commit.com/ +.. _pytest-xdist: https://pytest-xdist.readthedocs.io +.. _setuptools_scm: https://github.com/pypa/setuptools_scm + + 4.0.2 (Wednesday 31 August 2022) ================================ From 3361e0deee1a2d4ad7f323282073dd634bfa7831 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Jan 2023 08:21:45 -0500 Subject: [PATCH 186/702] ENH: Stop using deprecated TemporaryDirectory internally --- nibabel/tests/test_data.py | 2 +- nibabel/tests/test_loadsave.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index af7ef66bde..abcb3caaf2 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -6,6 +6,7 @@ import tempfile from os import environ as env from os.path import join as pjoin +from tempfile import TemporaryDirectory import pytest @@ -21,7 +22,6 @@ get_data_path, make_datasource, ) -from ..tmpdirs import TemporaryDirectory from .test_environment import DATA_KEY, USER_KEY, with_environment diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 3b58772b6a..de1d818039 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -5,6 +5,7 @@ import shutil from os.path import dirname from os.path import join as pjoin +from tempfile import TemporaryDirectory import numpy as np @@ -21,7 +22,7 @@ from ..openers import Opener from ..optpkg import optional_package from ..testing import expires -from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory +from ..tmpdirs import InTemporaryDirectory _, have_scipy, _ = optional_package('scipy') _, have_pyzstd, _ = optional_package('pyzstd') From 4c5c64afac119cb96f39d384a921e3d80af405be Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Jan 2023 08:40:05 -0500 Subject: [PATCH 187/702] DOC: Update requirements in doc/source/installation.rst --- doc/source/installation.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/source/installation.rst b/doc/source/installation.rst index 5e6009f7ae..65a35ea333 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -81,14 +81,15 @@ is for you. Requirements ------------ -.. check these against setup.cfg +.. check these against pyproject.toml -* Python_ 3.6 or greater -* NumPy_ 1.14 or greater -* Packaging_ 14.3 or greater +* Python_ 3.8 or greater +* NumPy_ 1.19 or greater +* Packaging_ 17.0 or greater +* Setuptools_ * SciPy_ (optional, for full SPM-ANALYZE support) * h5py_ (optional, for MINC2 support) -* PyDICOM_ 0.9.9 or greater (optional, for DICOM support) +* PyDICOM_ 1.0.0 or greater (optional, for DICOM support) * `Python Imaging Library`_ (optional, for PNG conversion in DICOMFS) * pytest_ (optional, to run the tests) * sphinx_ (optional, to build the documentation) From 83605978931511b7a026b384ced2b990e666d0c5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Jan 2023 08:40:55 -0500 Subject: [PATCH 188/702] MNT: Update zenodo ordering --- .zenodo.json | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index a2c6ccee70..823206c593 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -106,6 +106,14 @@ { "name": "Madison, Cindee" }, + { + "affiliation": "CEA", + "name": "Papadopoulos Orfanos, Dimitri", + "orcid": "0000-0002-1242-8990" + }, + { + "name": "S\u00f3lon, Anibal" + }, { "name": "Moloney, Brendan" }, @@ -117,6 +125,9 @@ "name": "Goncalves, Mathias", "orcid": "0000-0002-7252-7771" }, + { + "name": "Baratz, Zvi" + }, { "affiliation": "Montreal Neurological Institute and Hospital", "name": "Markello, Ross", @@ -127,9 +138,6 @@ "name": "Riddell, Cameron", "orcid": "0000-0001-8950-0375" }, - { - "name": "S\u00f3lon, Anibal" - }, { "name": "Burns, Christopher" }, @@ -160,11 +168,6 @@ { "name": "Subramaniam, Krish" }, - { - "affiliation": "CEA", - "name": "Papadopoulos Orfanos, Dimitri", - "orcid": "0000-0002-1242-8990" - }, { "name": "Van, Andrew" }, @@ -305,9 +308,6 @@ { "name": "Schwartz, Yannick" }, - { - "name": "Baratz, Zvi" - }, { "affiliation": "Hospital for Sick Children", "name": "Darwin, Benjamin C" From 418084757bf69957872489158ef199c3b872f717 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Mon, 9 Jan 2023 16:13:17 +0200 Subject: [PATCH 189/702] Update .zenodo.json --- .zenodo.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index 823206c593..75dea73eed 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -126,7 +126,8 @@ "orcid": "0000-0002-7252-7771" }, { - "name": "Baratz, Zvi" + "name": "Baratz, Zvi", + "orcid": "0000-0001-7159-1387" }, { "affiliation": "Montreal Neurological Institute and Hospital", From 5f37ffa31f36570d4efea836e7ec6dcd6307aad0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Jan 2023 09:33:24 -0500 Subject: [PATCH 190/702] FIX: Version comparison on alert_future_error --- nibabel/deprecated.py | 2 +- nibabel/tests/test_deprecated.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index ab9f31c8cf..eb3252fe7e 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -111,7 +111,7 @@ def alert_future_error( Warnings stacklevel to provide; note that this will be incremented by 1, so provide the stacklevel you would provide directly to warnings.warn() """ - if cmp_pkg_version(version) >= 0: + if cmp_pkg_version(version) > 0: msg = f'{msg} This will error in NiBabel {version}. {warning_rec}' warnings.warn(msg.strip(), warning_class, stacklevel=stacklevel + 1) else: diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index 8b9f6c360f..2576eca3d9 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -109,3 +109,11 @@ def test_alert_future_error(): error_rec='Fix this issue by doing XYZ.', error_class=ValueError, ) + with pytest.raises(ValueError): + alert_future_error( + 'Message', + '2.0.0', # Error if we equal the (patched) version + warning_rec='Silence this warning by doing XYZ.', + error_rec='Fix this issue by doing XYZ.', + error_class=ValueError, + ) From 323a88ae3e278d1d0f1bcbad223575a323a607ac Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Jan 2023 09:50:15 -0500 Subject: [PATCH 191/702] TEST: Fix version comparison for alert_future_error in nifti1 tests --- nibabel/tests/test_nifti1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 808d06c15a..15971c21f5 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -775,7 +775,7 @@ def test_int64_warning_or_error(self): for dtype in (np.int64, np.uint64): data = np.arange(24, dtype=dtype).reshape((2, 3, 4)) # Starts as a warning, transitions to error at 5.0 - if cmp_pkg_version('5.0') < 0: + if cmp_pkg_version('5.0') <= 0: cm = pytest.raises(ValueError) else: cm = pytest.warns(FutureWarning) From bb374f10e5cc9b99c9777a5fe7bcf4e8bd233ead Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 13 Jan 2023 10:15:37 -0500 Subject: [PATCH 192/702] FIX: Return to cwd on exception in InTemporaryDirectory --- nibabel/tmpdirs.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index a3be77ffa8..3074fca6f2 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -20,8 +20,10 @@ def _chdir(path): cwd = os.getcwd() os.chdir(path) - yield - os.chdir(cwd) + try: + yield + finally: + os.chdir(cwd) from .deprecated import deprecate_with_version From 6083235a118759fd4865b4c0afb11abbf11d33bc Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 15 Jan 2023 20:55:14 -0500 Subject: [PATCH 193/702] CI: Cache git-archive separately from Python packages --- .github/workflows/stable.yml | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 0c560bcb4d..e3a0d82fae 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -46,11 +46,17 @@ jobs: run: python -m build - run: twine check dist/* - name: Build git archive - run: git archive -v -o dist/nibabel-archive.tgz HEAD - - uses: actions/upload-artifact@v3 + run: mkdir archive && git archive -v -o archive/nibabel-archive.tgz HEAD + - name: Upload sdist and wheel artifacts + uses: actions/upload-artifact@v3 with: name: dist path: dist/ + - name: Upload git archive artifact + uses: actions/upload-artifact@v3 + with: + name: archive + path: archive/ test-package: runs-on: ubuntu-latest @@ -59,10 +65,18 @@ jobs: matrix: package: ['wheel', 'sdist', 'archive'] steps: - - uses: actions/download-artifact@v3 + - name: Download sdist and wheel artifacts + uses: actions/download-artifact@v3 with: name: dist path: dist/ + if: matrix.package != 'archive' + - name: Download git archive artifact + uses: actions/download-artifact@v3 + with: + name: archive + path: archive/ + if: matrix.package == 'archive' - uses: actions/setup-python@v4 with: python-version: 3 @@ -77,7 +91,7 @@ jobs: run: pip install dist/nibabel-*.tar.gz if: matrix.package == 'sdist' - name: Install archive - run: pip install dist/nibabel-archive.tgz + run: pip install archive/nibabel-archive.tgz if: matrix.package == 'archive' - run: python -c 'import nibabel; print(nibabel.__version__)' - name: Install test extras From 62daa8465688c278c4014327edae1b64c363f345 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 16 Jan 2023 09:09:32 -0500 Subject: [PATCH 194/702] CI: Reorder if constraints to follow step names --- .github/workflows/stable.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index e3a0d82fae..315534107f 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -66,17 +66,17 @@ jobs: package: ['wheel', 'sdist', 'archive'] steps: - name: Download sdist and wheel artifacts + if: matrix.package != 'archive' uses: actions/download-artifact@v3 with: name: dist path: dist/ - if: matrix.package != 'archive' - name: Download git archive artifact + if: matrix.package == 'archive' uses: actions/download-artifact@v3 with: name: archive path: archive/ - if: matrix.package == 'archive' - uses: actions/setup-python@v4 with: python-version: 3 @@ -85,14 +85,14 @@ jobs: - name: Update pip run: pip install --upgrade pip - name: Install wheel - run: pip install dist/nibabel-*.whl if: matrix.package == 'wheel' + run: pip install dist/nibabel-*.whl - name: Install sdist - run: pip install dist/nibabel-*.tar.gz if: matrix.package == 'sdist' + run: pip install dist/nibabel-*.tar.gz - name: Install archive - run: pip install archive/nibabel-archive.tgz if: matrix.package == 'archive' + run: pip install archive/nibabel-archive.tgz - run: python -c 'import nibabel; print(nibabel.__version__)' - name: Install test extras run: pip install nibabel[test] @@ -179,17 +179,17 @@ jobs: - name: Install NiBabel run: tools/ci/install.sh - name: Run tests - run: tools/ci/check.sh if: ${{ matrix.check != 'skiptests' }} + run: tools/ci/check.sh - name: Submit coverage - run: tools/ci/submit_coverage.sh if: ${{ always() }} + run: tools/ci/submit_coverage.sh - name: Upload pytest test results + if: ${{ always() && matrix.check == 'test' }} uses: actions/upload-artifact@v3 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: for_testing/test-results.xml - if: ${{ always() && matrix.check == 'test' }} publish: runs-on: ubuntu-latest From 85de727e905a47d3c6e069fa31f42e8e97d131e8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Jan 2023 10:21:13 -0500 Subject: [PATCH 195/702] FIX: Separate EcatImage _header and _subheader variables and types --- nibabel/ecat.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 8b11e881a7..f1a40dd27c 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -747,12 +747,14 @@ def __getitem__(self, sliceobj): class EcatImage(SpatialImage): """Class returns a list of Ecat images, with one image(hdr/data) per frame""" - _header = EcatHeader - header_class = _header + header_class = EcatHeader + subheader_class = EcatSubHeader valid_exts = ('.v',) - _subheader = EcatSubHeader files_types = (('image', '.v'), ('header', '.v')) + _header: EcatHeader + _subheader: EcatSubHeader + ImageArrayProxy = EcatImageArrayProxy def __init__(self, dataobj, affine, header, subheader, mlist, extra=None, file_map=None): @@ -879,14 +881,14 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): hdr_file, img_file = klass._get_fileholders(file_map) # note header and image are in same file hdr_fid = hdr_file.get_prepare_fileobj(mode='rb') - header = klass._header.from_fileobj(hdr_fid) + header = klass.header_class.from_fileobj(hdr_fid) hdr_copy = header.copy() # LOAD MLIST mlist = np.zeros((header['num_frames'], 4), dtype=np.int32) mlist_data = read_mlist(hdr_fid, hdr_copy.endianness) mlist[: len(mlist_data)] = mlist_data # LOAD SUBHEADERS - subheaders = klass._subheader(hdr_copy, mlist, hdr_fid) + subheaders = klass.subheader_class(hdr_copy, mlist, hdr_fid) # LOAD DATA # Class level ImageArrayProxy data = klass.ImageArrayProxy(subheaders) From 0a8701a1862cfe2438bcd78c5543fd6d5a9df721 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Jan 2023 10:57:15 -0500 Subject: [PATCH 196/702] TEST: Drop unittest.TestCase base class; pytest does not need it --- nibabel/testing/__init__.py | 13 ------------- nibabel/tests/test_spatialimages.py | 3 +-- nibabel/tests/test_wrapstruct.py | 3 +-- 3 files changed, 2 insertions(+), 17 deletions(-) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index eb99eabca0..bcd62e470c 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -210,19 +210,6 @@ def assert_arr_dict_equal(dict1, dict2): assert_array_equal(value1, value2) -class BaseTestCase(unittest.TestCase): - """TestCase that does not attempt to run if prefixed with a ``_`` - - This restores the nose-like behavior of skipping so-named test cases - in test runners like pytest. - """ - - def setUp(self): - if self.__class__.__name__.startswith('_'): - raise unittest.SkipTest('Base test case - subclass to run') - super().setUp() - - def expires(version): """Decorator to mark a test as xfail with ExpiredDeprecationError after version""" from packaging.version import Version diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 27305739aa..b4fc7e21b7 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -11,7 +11,6 @@ import warnings from io import BytesIO -from unittest import TestCase import numpy as np import pytest @@ -205,7 +204,7 @@ def __array__(self, dtype='int16'): return np.arange(3, dtype=dtype) -class TestSpatialImage(TestCase): +class TestSpatialImage: # class for testing images image_class = SpatialImage can_save = False diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 66dda18237..70f22894ad 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -33,7 +33,6 @@ from .. import imageglobals from ..batteryrunners import Report from ..spatialimages import HeaderDataError -from ..testing import BaseTestCase from ..volumeutils import Recoder, native_code, swapped_code from ..wrapstruct import LabeledWrapStruct, WrapStruct, WrapStructError @@ -101,7 +100,7 @@ def log_chk(hdr, level): return hdrc, message, raiser -class _TestWrapStructBase(BaseTestCase): +class _TestWrapStructBase: """Class implements base tests for binary headers It serves as a base class for other binary header tests From 12db9ec3cb47416b76b8e74a45b9afcf674aa6a8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Jan 2023 11:08:01 -0500 Subject: [PATCH 197/702] TEST: Refactor no_scaling test to parametrize without looping --- nibabel/tests/conftest.py | 18 +++++++ nibabel/tests/test_spm99analyze.py | 86 +++++++++++++++--------------- 2 files changed, 61 insertions(+), 43 deletions(-) create mode 100644 nibabel/tests/conftest.py diff --git a/nibabel/tests/conftest.py b/nibabel/tests/conftest.py new file mode 100644 index 0000000000..3cf54a34c5 --- /dev/null +++ b/nibabel/tests/conftest.py @@ -0,0 +1,18 @@ +import pytest + +from ..spatialimages import supported_np_types + + +# Generate dynamic fixtures +def pytest_generate_tests(metafunc): + if 'supported_dtype' in metafunc.fixturenames: + if metafunc.cls is None or not getattr(metafunc.cls, 'image_class'): + raise pytest.UsageError( + 'Attempting to use supported_dtype fixture outside an image test case' + ) + # xdist needs a consistent ordering, so sort by class name + supported_dtypes = sorted( + supported_np_types(metafunc.cls.image_class.header_class()), + key=lambda x: x.__name__, + ) + metafunc.parametrize('supported_dtype', supported_dtypes) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 9bc4c928a6..42d4265ed3 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -306,57 +306,57 @@ def test_int_int_scaling(self): img_rt = bytesio_round_trip(img) assert_array_equal(img_rt.get_fdata(), np.clip(arr, 0, 255)) - def test_no_scaling(self): + # NOTE: Need to check complex scaling + @pytest.mark.parametrize('in_dtype', FLOAT_TYPES + IUINT_TYPES) + def test_no_scaling(self, in_dtype, supported_dtype): # Test writing image converting types when not calculating scaling img_class = self.image_class hdr_class = img_class.header_class hdr = hdr_class() - supported_types = supported_np_types(hdr) # Any old non-default slope and intercept slope = 2 inter = 10 if hdr.has_data_intercept else 0 - for in_dtype, out_dtype in itertools.product(FLOAT_TYPES + IUINT_TYPES, supported_types): - # Need to check complex scaling - mn_in, mx_in = _dt_min_max(in_dtype) - arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype) - img = img_class(arr, np.eye(4), hdr) - img.set_data_dtype(out_dtype) - # Setting the scaling means we don't calculate it later - img.header.set_slope_inter(slope, inter) - with np.errstate(invalid='ignore'): - rt_img = bytesio_round_trip(img) - with suppress_warnings(): # invalid mult - back_arr = np.asanyarray(rt_img.dataobj) - exp_back = arr.copy() - # If converting to floating point type, casting is direct. - # Otherwise we will need to do float-(u)int casting at some point - if out_dtype in IUINT_TYPES: - if in_dtype in FLOAT_TYPES: - # Working precision is (at least) float - exp_back = exp_back.astype(float) - # Float to iu conversion will always round, clip - with np.errstate(invalid='ignore'): - exp_back = np.round(exp_back) - if in_dtype in FLOAT_TYPES: - # Clip to shared range of working precision - exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) - else: # iu input and output type - # No scaling, never gets converted to float. - # Does get clipped to range of output type - mn_out, mx_out = _dt_min_max(out_dtype) - if (mn_in, mx_in) != (mn_out, mx_out): - # Use smaller of input, output range to avoid np.clip - # upcasting the array because of large clip limits. - exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) - if out_dtype in COMPLEX_TYPES: - # always cast to real from complex - exp_back = exp_back.astype(out_dtype) - else: - # Cast to working precision + + mn_in, mx_in = _dt_min_max(in_dtype) + arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype) + img = img_class(arr, np.eye(4), hdr) + img.set_data_dtype(supported_dtype) + # Setting the scaling means we don't calculate it later + img.header.set_slope_inter(slope, inter) + with np.errstate(invalid='ignore'): + rt_img = bytesio_round_trip(img) + with suppress_warnings(): # invalid mult + back_arr = np.asanyarray(rt_img.dataobj) + exp_back = arr.copy() + # If converting to floating point type, casting is direct. + # Otherwise we will need to do float-(u)int casting at some point + if supported_dtype in IUINT_TYPES: + if in_dtype in FLOAT_TYPES: + # Working precision is (at least) float exp_back = exp_back.astype(float) - # Allow for small differences in large numbers - with suppress_warnings(): # invalid value - assert_allclose_safely(back_arr, exp_back * slope + inter) + # Float to iu conversion will always round, clip + with np.errstate(invalid='ignore'): + exp_back = np.round(exp_back) + if in_dtype in FLOAT_TYPES: + # Clip to shared range of working precision + exp_back = np.clip(exp_back, *shared_range(float, supported_dtype)) + else: # iu input and output type + # No scaling, never gets converted to float. + # Does get clipped to range of output type + mn_out, mx_out = _dt_min_max(supported_dtype) + if (mn_in, mx_in) != (mn_out, mx_out): + # Use smaller of input, output range to avoid np.clip + # upcasting the array because of large clip limits. + exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) + if supported_dtype in COMPLEX_TYPES: + # always cast to real from complex + exp_back = exp_back.astype(supported_dtype) + else: + # Cast to working precision + exp_back = exp_back.astype(float) + # Allow for small differences in large numbers + with suppress_warnings(): # invalid value + assert_allclose_safely(back_arr, exp_back * slope + inter) def test_write_scaling(self): # Check writes with scaling set From e96ecf7c377fb8ee4b44eabca380dc529a0d477d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Jan 2023 10:16:58 -0500 Subject: [PATCH 198/702] RF: Use np.sctypesDict to source scalar types np.sctypes does not have a consistent value type, and does not enumerate all scalar types of a given kind. --- nibabel/spatialimages.py | 22 ++++++++++------------ nibabel/tests/test_analyze.py | 14 +++++++------- nibabel/tests/test_nifti1.py | 2 +- nibabel/tests/test_spm99analyze.py | 16 ++++++++++++---- 4 files changed, 30 insertions(+), 24 deletions(-) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 4bd25e986f..af80c25881 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -284,19 +284,17 @@ def supported_np_types(obj): set of numpy types that `obj` supports """ dt = obj.get_data_dtype() - supported = [] - for name, np_types in np.sctypes.items(): - for np_type in np_types: - try: - obj.set_data_dtype(np_type) - except HeaderDataError: - continue - # Did set work? - if np.dtype(obj.get_data_dtype()) == np.dtype(np_type): - supported.append(np_type) - # Reset original header dtype + supported = set() + for np_type in set(np.sctypeDict.values()): + try: + obj.set_data_dtype(np_type) + except HeaderDataError: + continue + # Did set work? + if np.dtype(obj.get_data_dtype()) == np.dtype(np_type): + supported.add(np_type) obj.set_data_dtype(dt) - return set(supported) + return supported class ImageDataError(Exception): diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 7584d550f6..b4a3cd297b 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -49,12 +49,12 @@ PIXDIM0_MSG = 'pixdim[1,2,3] should be non-zero; setting 0 dims to 1' -def add_intp(supported_np_types): - # Add intp, uintp to supported types as necessary - supported_dtypes = [np.dtype(t) for t in supported_np_types] - for np_type in (np.intp, np.uintp): - if np.dtype(np_type) in supported_dtypes: - supported_np_types.add(np_type) +def add_duplicate_types(supported_np_types): + # Update supported numpy types with named scalar types that map to the same set of dtypes + dtypes = {np.dtype(t) for t in supported_np_types} + supported_np_types.update( + scalar for scalar in set(np.sctypeDict.values()) if np.dtype(scalar) in dtypes + ) class TestAnalyzeHeader(tws._TestLabeledWrapStruct): @@ -62,7 +62,7 @@ class TestAnalyzeHeader(tws._TestLabeledWrapStruct): example_file = header_file sizeof_hdr = AnalyzeHeader.sizeof_hdr supported_np_types = {np.uint8, np.int16, np.int32, np.float32, np.float64, np.complex64} - add_intp(supported_np_types) + add_duplicate_types(supported_np_types) def test_supported_types(self): hdr = self.header_class() diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 15971c21f5..7b7f44fe0b 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -80,7 +80,7 @@ class TestNifti1PairHeader(tana.TestAnalyzeHeader, tspm.HeaderScalingMixin): ) if have_binary128(): supported_np_types = supported_np_types.union((np.longdouble, np.longcomplex)) - tana.add_intp(supported_np_types) + tana.add_duplicate_types(supported_np_types) def test_empty(self): tana.TestAnalyzeHeader.test_empty(self) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 42d4265ed3..9f1dc63b4d 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -35,10 +35,18 @@ from ..volumeutils import _dt_min_max, apply_read_scaling from . import test_analyze -FLOAT_TYPES = np.sctypes['float'] -COMPLEX_TYPES = np.sctypes['complex'] -INT_TYPES = np.sctypes['int'] -UINT_TYPES = np.sctypes['uint'] +# np.sctypes values are lists of types with unique sizes +# For testing, we want all concrete classes of a type +# Key on kind, rather than abstract base classes, since timedelta64 is a signedinteger +sctypes = {} +for sctype in set(np.sctypeDict.values()): + sctypes.setdefault(np.dtype(sctype).kind, []).append(sctype) + +# Sort types to ensure that xdist doesn't complain about test order when we parametrize +FLOAT_TYPES = sorted(sctypes['f'], key=lambda x: x.__name__) +COMPLEX_TYPES = sorted(sctypes['c'], key=lambda x: x.__name__) +INT_TYPES = sorted(sctypes['i'], key=lambda x: x.__name__) +UINT_TYPES = sorted(sctypes['u'], key=lambda x: x.__name__) CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES IUINT_TYPES = INT_TYPES + UINT_TYPES NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES From 3686e03690b64b1246d8918c9a31e062fc35e13c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Jan 2023 21:44:43 -0500 Subject: [PATCH 199/702] RF: Cache supported_np_types by class --- nibabel/spatialimages.py | 43 +++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index af80c25881..884eed7074 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -143,6 +143,11 @@ from .viewers import OrthoSlicer3D from .volumeutils import shape_zoom_affine +try: + from functools import cache +except ImportError: # PY38 + from functools import lru_cache as cache + class HeaderDataError(Exception): """Class to indicate error in getting or setting header data""" @@ -268,22 +273,29 @@ def data_from_fileobj(self, fileobj): return np.ndarray(shape, dtype, data_bytes, order=self.data_layout) -def supported_np_types(obj): - """Numpy data types that instance `obj` supports +@cache +def _supported_np_types(klass): + """Numpy data types that instances of ``klass`` support Parameters ---------- - obj : object - Object implementing `get_data_dtype` and `set_data_dtype`. The object + klass : class + Class implementing `get_data_dtype` and `set_data_dtype` methods. The object should raise ``HeaderDataError`` for setting unsupported dtypes. The object will likely be a header or a :class:`SpatialImage` Returns ------- np_types : set - set of numpy types that `obj` supports + set of numpy types that ``klass`` instances support """ - dt = obj.get_data_dtype() + try: + obj = klass() + except TypeError as e: + if hasattr(klass, 'header_class'): + obj = klass.header_class() + else: + raise e supported = set() for np_type in set(np.sctypeDict.values()): try: @@ -293,10 +305,27 @@ def supported_np_types(obj): # Did set work? if np.dtype(obj.get_data_dtype()) == np.dtype(np_type): supported.add(np_type) - obj.set_data_dtype(dt) return supported +def supported_np_types(obj): + """Numpy data types that instance `obj` supports + + Parameters + ---------- + obj : object + Object implementing `get_data_dtype` and `set_data_dtype`. The object + should raise ``HeaderDataError`` for setting unsupported dtypes. The + object will likely be a header or a :class:`SpatialImage` + + Returns + ------- + np_types : set + set of numpy types that `obj` supports + """ + return _supported_np_types(obj.__class__) + + class ImageDataError(Exception): pass From b74878315082673f540003d2d9e7bb9e39643037 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 16:40:20 -0500 Subject: [PATCH 200/702] TYP: Align arrayproxy.ArrayLike to satisfy np.ndarray --- nibabel/arrayproxy.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 7213e65769..12a0a7caf3 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -59,6 +59,9 @@ if ty.TYPE_CHECKING: # pragma: no cover import numpy.typing as npt + # Taken from numpy/__init__.pyi + _DType = ty.TypeVar('_DType', bound=np.dtype[ty.Any]) + class ArrayLike(ty.Protocol): """Protocol for numpy ndarray-like objects @@ -68,9 +71,19 @@ class ArrayLike(ty.Protocol): """ shape: tuple[int, ...] - ndim: int - def __array__(self, dtype: npt.DTypeLike | None = None, /) -> npt.NDArray: + @property + def ndim(self) -> int: + ... # pragma: no cover + + # If no dtype is passed, any dtype might be returned, depending on the array-like + @ty.overload + def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: + ... # pragma: no cover + + # Any dtype might be passed, and *that* dtype must be returned + @ty.overload + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... # pragma: no cover def __getitem__(self, key, /) -> npt.NDArray: From 2e1814cdcb3863716cf274156a1c7a6451f16896 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 17:04:17 -0500 Subject: [PATCH 201/702] TYP: Use type variables to annotate filebasedimage classes --- nibabel/filebasedimages.py | 72 +++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 82398bac18..556d8b75e5 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -24,6 +24,11 @@ FileMap = ty.Mapping[str, FileHolder] FileSniff = ty.Tuple[bytes, str] +ImgT = ty.TypeVar('ImgT', bound='FileBasedImage') +HdrT = ty.TypeVar('HdrT', bound='FileBasedHeader') + +StreamImgT = ty.TypeVar('StreamImgT', bound='SerializableImage') + class ImageFileError(Exception): pass @@ -33,7 +38,7 @@ class FileBasedHeader: """Template class to implement header protocol""" @classmethod - def from_header(klass, header=None): + def from_header(klass: type[HdrT], header: FileBasedHeader | ty.Mapping | None = None) -> HdrT: if header is None: return klass() # I can't do isinstance here because it is not necessarily true @@ -47,19 +52,19 @@ def from_header(klass, header=None): ) @classmethod - def from_fileobj(klass, fileobj: io.IOBase): - raise NotImplementedError + def from_fileobj(klass: type[HdrT], fileobj: io.IOBase) -> HdrT: + raise NotImplementedError # pragma: no cover - def write_to(self, fileobj: io.IOBase): - raise NotImplementedError + def write_to(self, fileobj: io.IOBase) -> None: + raise NotImplementedError # pragma: no cover - def __eq__(self, other): - raise NotImplementedError + def __eq__(self, other: object) -> bool: + raise NotImplementedError # pragma: no cover - def __ne__(self, other): + def __ne__(self, other: object) -> bool: return not self == other - def copy(self) -> FileBasedHeader: + def copy(self: HdrT) -> HdrT: """Copy object to independent representation The copy should not be affected by any changes to the original @@ -153,6 +158,7 @@ class FileBasedImage: """ header_class: Type[FileBasedHeader] = FileBasedHeader + _header: FileBasedHeader _meta_sniff_len: int = 0 files_types: tuple[tuple[str, str | None], ...] = (('image', None),) valid_exts: tuple[str, ...] = () @@ -186,7 +192,7 @@ def __init__( self._header = self.header_class.from_header(header) if extra is None: extra = {} - self.extra = extra + self.extra = dict(extra) if file_map is None: file_map = self.__class__.make_file_map() @@ -196,7 +202,7 @@ def __init__( def header(self) -> FileBasedHeader: return self._header - def __getitem__(self, key): + def __getitem__(self, key) -> None: """No slicing or dictionary interface for images""" raise TypeError('Cannot slice image objects.') @@ -221,7 +227,7 @@ def get_filename(self) -> str | None: characteristic_type = self.files_types[0][0] return self.file_map[characteristic_type].filename - def set_filename(self, filename: str): + def set_filename(self, filename: str) -> None: """Sets the files in the object from a given filename The different image formats may check whether the filename has @@ -239,16 +245,16 @@ def set_filename(self, filename: str): self.file_map = self.__class__.filespec_to_file_map(filename) @classmethod - def from_filename(klass, filename: FileSpec): + def from_filename(klass: type[ImgT], filename: FileSpec) -> ImgT: file_map = klass.filespec_to_file_map(filename) return klass.from_file_map(file_map) @classmethod - def from_file_map(klass, file_map: FileMap): - raise NotImplementedError + def from_file_map(klass: type[ImgT], file_map: FileMap) -> ImgT: + raise NotImplementedError # pragma: no cover @classmethod - def filespec_to_file_map(klass, filespec: FileSpec): + def filespec_to_file_map(klass, filespec: FileSpec) -> FileMap: """Make `file_map` for this class from filename `filespec` Class method @@ -282,7 +288,7 @@ def filespec_to_file_map(klass, filespec: FileSpec): file_map[key] = FileHolder(filename=fname) return file_map - def to_filename(self, filename: FileSpec, **kwargs): + def to_filename(self, filename: FileSpec, **kwargs) -> None: r"""Write image to files implied by filename string Parameters @@ -301,11 +307,11 @@ def to_filename(self, filename: FileSpec, **kwargs): self.file_map = self.filespec_to_file_map(filename) self.to_file_map(**kwargs) - def to_file_map(self, file_map: FileMap | None = None, **kwargs): - raise NotImplementedError + def to_file_map(self, file_map: FileMap | None = None, **kwargs) -> None: + raise NotImplementedError # pragma: no cover @classmethod - def make_file_map(klass, mapping: ty.Mapping[str, str | io.IOBase] | None = None): + def make_file_map(klass, mapping: ty.Mapping[str, str | io.IOBase] | None = None) -> FileMap: """Class method to make files holder for this image type Parameters @@ -338,7 +344,7 @@ def make_file_map(klass, mapping: ty.Mapping[str, str | io.IOBase] | None = None load = from_filename @classmethod - def instance_to_filename(klass, img: FileBasedImage, filename: FileSpec): + def instance_to_filename(klass, img: FileBasedImage, filename: FileSpec) -> None: """Save `img` in our own format, to name implied by `filename` This is a class method @@ -354,20 +360,20 @@ def instance_to_filename(klass, img: FileBasedImage, filename: FileSpec): img.to_filename(filename) @classmethod - def from_image(klass, img: FileBasedImage): + def from_image(klass: type[ImgT], img: FileBasedImage) -> ImgT: """Class method to create new instance of own class from `img` Parameters ---------- - img : ``spatialimage`` instance + img : ``FileBasedImage`` instance In fact, an object with the API of ``FileBasedImage``. Returns ------- - cimg : ``spatialimage`` instance + img : ``FileBasedImage`` instance Image, of our own class """ - raise NotImplementedError() + raise NotImplementedError # pragma: no cover @classmethod def _sniff_meta_for( @@ -375,7 +381,7 @@ def _sniff_meta_for( filename: FileSpec, sniff_nbytes: int, sniff: FileSniff | None = None, - ): + ) -> FileSniff | None: """Sniff metadata for image represented by `filename` Parameters @@ -425,7 +431,7 @@ def path_maybe_image( filename: FileSpec, sniff: FileSniff | None = None, sniff_max: int = 1024, - ): + ) -> tuple[bool, FileSniff | None]: """Return True if `filename` may be image matching this class Parameters @@ -527,14 +533,14 @@ class SerializableImage(FileBasedImage): """ @classmethod - def _filemap_from_iobase(klass, io_obj: io.IOBase): + def _filemap_from_iobase(klass, io_obj: io.IOBase) -> FileMap: """For single-file image types, make a file map with the correct key""" if len(klass.files_types) > 1: raise NotImplementedError('(de)serialization is undefined for multi-file images') return klass.make_file_map({klass.files_types[0][0]: io_obj}) @classmethod - def from_stream(klass, io_obj: io.IOBase): + def from_stream(klass: type[StreamImgT], io_obj: io.IOBase) -> StreamImgT: """Load image from readable IO stream Convert to BytesIO to enable seeking, if input stream is not seekable @@ -548,7 +554,7 @@ def from_stream(klass, io_obj: io.IOBase): io_obj = io.BytesIO(io_obj.read()) return klass.from_file_map(klass._filemap_from_iobase(io_obj)) - def to_stream(self, io_obj: io.IOBase, **kwargs): + def to_stream(self, io_obj: io.IOBase, **kwargs) -> None: r"""Save image to writable IO stream Parameters @@ -561,7 +567,7 @@ def to_stream(self, io_obj: io.IOBase, **kwargs): self.to_file_map(self._filemap_from_iobase(io_obj), **kwargs) @classmethod - def from_bytes(klass, bytestring: bytes): + def from_bytes(klass: type[StreamImgT], bytestring: bytes) -> StreamImgT: """Construct image from a byte string Class method @@ -592,7 +598,9 @@ def to_bytes(self, **kwargs) -> bytes: return bio.getvalue() @classmethod - def from_url(klass, url: str | request.Request, timeout: float = 5): + def from_url( + klass: type[StreamImgT], url: str | request.Request, timeout: float = 5 + ) -> StreamImgT: """Retrieve and load an image from a URL Class method From d61ea0780892e42a844113c4d3d25c04367a434b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 20:57:10 -0500 Subject: [PATCH 202/702] TYP: Annotate DataobjImage classmethods, clarify get_fdata() return type --- nibabel/analyze.py | 2 +- nibabel/brikhead.py | 2 +- nibabel/dataobj_images.py | 14 ++++++++------ nibabel/freesurfer/mghformat.py | 2 +- nibabel/minc1.py | 2 +- nibabel/minc2.py | 2 +- nibabel/spm2analyze.py | 2 +- nibabel/spm99analyze.py | 2 +- 8 files changed, 15 insertions(+), 13 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index fc44693bc6..d738934fff 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -1064,5 +1064,5 @@ def to_file_map(self, file_map=None, dtype=None): hdr['scl_inter'] = inter -load = AnalyzeImage.load +load = AnalyzeImage.from_filename save = AnalyzeImage.instance_to_filename diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 54b6d021f3..f375b541dc 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -564,4 +564,4 @@ def filespec_to_file_map(klass, filespec): return file_map -load = AFNIImage.load +load = AFNIImage.from_filename diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 4d884be66a..f23daf5d8d 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -20,12 +20,14 @@ if ty.TYPE_CHECKING: # pragma: no cover import numpy.typing as npt +ArrayImgT = ty.TypeVar('ArrayImgT', bound='DataobjImage') + class DataobjImage(FileBasedImage): """Template class for images that have dataobj data stores""" _data_cache: np.ndarray | None - _fdata_cache: np.ndarray | None + _fdata_cache: np.ndarray[ty.Any, np.dtype[np.floating]] | None def __init__( self, @@ -222,7 +224,7 @@ def get_fdata( self, caching: ty.Literal['fill', 'unchanged'] = 'fill', dtype: npt.DTypeLike = np.float64, - ) -> np.ndarray: + ) -> np.ndarray[ty.Any, np.dtype[np.floating]]: """Return floating point image data with necessary scaling applied The image ``dataobj`` property can be an array proxy or an array. An @@ -421,12 +423,12 @@ def ndim(self) -> int: @classmethod def from_file_map( - klass, + klass: type[ArrayImgT], file_map: FileMap, *, mmap: bool | ty.Literal['c', 'r'] = True, keep_file_open: bool | None = None, - ): + ) -> ArrayImgT: """Class method to create image from mapping in ``file_map`` Parameters @@ -460,12 +462,12 @@ def from_file_map( @classmethod def from_filename( - klass, + klass: type[ArrayImgT], filename: FileSpec, *, mmap: bool | ty.Literal['c', 'r'] = True, keep_file_open: bool | None = None, - ): + ) -> ArrayImgT: """Class method to create image from filename `filename` Parameters diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 6b97056524..693025efbe 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -589,5 +589,5 @@ def _affine2header(self): hdr['Pxyz_c'] = c_ras -load = MGHImage.load +load = MGHImage.from_filename save = MGHImage.instance_to_filename diff --git a/nibabel/minc1.py b/nibabel/minc1.py index b9d4bc2074..ebc167b0ee 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -334,4 +334,4 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): return klass(data, affine, header, extra=None, file_map=file_map) -load = Minc1Image.load +load = Minc1Image.from_filename diff --git a/nibabel/minc2.py b/nibabel/minc2.py index cdb567a996..cc0cb5e440 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -172,4 +172,4 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): return klass(data, affine, header, extra=None, file_map=file_map) -load = Minc2Image.load +load = Minc2Image.from_filename diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index 67389403b9..b326e7eac0 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -130,5 +130,5 @@ class Spm2AnalyzeImage(spm99.Spm99AnalyzeImage): header_class = Spm2AnalyzeHeader -load = Spm2AnalyzeImage.load +load = Spm2AnalyzeImage.from_filename save = Spm2AnalyzeImage.instance_to_filename diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index a089bedb02..9c2aa15ed0 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -331,5 +331,5 @@ def to_file_map(self, file_map=None, dtype=None): sio.savemat(mfobj, {'M': M, 'mat': mat}, format='4') -load = Spm99AnalyzeImage.load +load = Spm99AnalyzeImage.from_filename save = Spm99AnalyzeImage.instance_to_filename From 7d263bd655997f6f01ce64b4de7760aedbb989e7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 20:58:29 -0500 Subject: [PATCH 203/702] MISC: Import ImageFileError from original module --- nibabel/nifti1.py | 4 ++-- nibabel/nifti2.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 9bb88e844c..a480afe49a 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -25,10 +25,10 @@ from .batteryrunners import Report from .casting import have_binary128 from .deprecated import alert_future_error -from .filebasedimages import SerializableImage +from .filebasedimages import ImageFileError, SerializableImage from .optpkg import optional_package from .quaternions import fillpositive, mat2quat, quat2mat -from .spatialimages import HeaderDataError, ImageFileError +from .spatialimages import HeaderDataError from .spm99analyze import SpmAnalyzeHeader from .volumeutils import Recoder, endian_codes, make_dt_codes diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index cb138962cc..9c898b47ba 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -17,8 +17,9 @@ from .analyze import AnalyzeHeader from .batteryrunners import Report +from .filebasedimages import ImageFileError from .nifti1 import Nifti1Header, Nifti1Image, Nifti1Pair -from .spatialimages import HeaderDataError, ImageFileError +from .spatialimages import HeaderDataError r""" Header struct from : https://www.nitrc.org/forum/message.php?msg_id=3738 From f475901fe49d6561f6ba3cefafe71ee29e89591e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Jan 2023 21:23:19 -0500 Subject: [PATCH 204/702] TYP: Annotate SpatialImage and SpatialHeader --- nibabel/spatialimages.py | 183 ++++++++++++++++++++++++--------------- 1 file changed, 115 insertions(+), 68 deletions(-) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 884eed7074..d437cf817a 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -131,13 +131,15 @@ """ from __future__ import annotations -from typing import Type +import io +import typing as ty +from typing import Literal, Sequence import numpy as np +from .arrayproxy import ArrayLike from .dataobj_images import DataobjImage -from .filebasedimages import ImageFileError # noqa -from .filebasedimages import FileBasedHeader +from .filebasedimages import FileBasedHeader, FileBasedImage, FileMap from .fileslice import canonical_slicers from .orientations import apply_orientation, inv_ornt_aff from .viewers import OrthoSlicer3D @@ -148,6 +150,32 @@ except ImportError: # PY38 from functools import lru_cache as cache +if ty.TYPE_CHECKING: # pragma: no cover + import numpy.typing as npt + +SpatialImgT = ty.TypeVar('SpatialImgT', bound='SpatialImage') +SpatialHdrT = ty.TypeVar('SpatialHdrT', bound='SpatialHeader') + + +class HasDtype(ty.Protocol): + def get_data_dtype(self) -> np.dtype: + ... # pragma: no cover + + def set_data_dtype(self, dtype: npt.DTypeLike) -> None: + ... # pragma: no cover + + +@ty.runtime_checkable +class SpatialProtocol(ty.Protocol): + def get_data_dtype(self) -> np.dtype: + ... # pragma: no cover + + def get_data_shape(self) -> ty.Tuple[int, ...]: + ... # pragma: no cover + + def get_zooms(self) -> ty.Tuple[float, ...]: + ... # pragma: no cover + class HeaderDataError(Exception): """Class to indicate error in getting or setting header data""" @@ -157,13 +185,22 @@ class HeaderTypeError(Exception): """Class to indicate error in parameters into header functions""" -class SpatialHeader(FileBasedHeader): +class SpatialHeader(FileBasedHeader, SpatialProtocol): """Template class to implement header protocol""" - default_x_flip = True - data_layout = 'F' + default_x_flip: bool = True + data_layout: Literal['F', 'C'] = 'F' - def __init__(self, data_dtype=np.float32, shape=(0,), zooms=None): + _dtype: np.dtype + _shape: tuple[int, ...] + _zooms: tuple[float, ...] + + def __init__( + self, + data_dtype: npt.DTypeLike = np.float32, + shape: Sequence[int] = (0,), + zooms: Sequence[float] | None = None, + ): self.set_data_dtype(data_dtype) self._zooms = () self.set_data_shape(shape) @@ -171,7 +208,10 @@ def __init__(self, data_dtype=np.float32, shape=(0,), zooms=None): self.set_zooms(zooms) @classmethod - def from_header(klass, header=None): + def from_header( + klass: type[SpatialHdrT], + header: SpatialProtocol | FileBasedHeader | ty.Mapping | None = None, + ) -> SpatialHdrT: if header is None: return klass() # I can't do isinstance here because it is not necessarily true @@ -180,26 +220,20 @@ def from_header(klass, header=None): # different field names if type(header) == klass: return header.copy() - return klass(header.get_data_dtype(), header.get_data_shape(), header.get_zooms()) - - @classmethod - def from_fileobj(klass, fileobj): - raise NotImplementedError - - def write_to(self, fileobj): - raise NotImplementedError - - def __eq__(self, other): - return (self.get_data_dtype(), self.get_data_shape(), self.get_zooms()) == ( - other.get_data_dtype(), - other.get_data_shape(), - other.get_zooms(), - ) - - def __ne__(self, other): - return not self == other + if isinstance(header, SpatialProtocol): + return klass(header.get_data_dtype(), header.get_data_shape(), header.get_zooms()) + return super().from_header(header) + + def __eq__(self, other: object) -> bool: + if isinstance(other, SpatialHeader): + return (self.get_data_dtype(), self.get_data_shape(), self.get_zooms()) == ( + other.get_data_dtype(), + other.get_data_shape(), + other.get_zooms(), + ) + return NotImplemented - def copy(self): + def copy(self: SpatialHdrT) -> SpatialHdrT: """Copy object to independent representation The copy should not be affected by any changes to the original @@ -207,47 +241,47 @@ def copy(self): """ return self.__class__(self._dtype, self._shape, self._zooms) - def get_data_dtype(self): + def get_data_dtype(self) -> np.dtype: return self._dtype - def set_data_dtype(self, dtype): + def set_data_dtype(self, dtype: npt.DTypeLike) -> None: self._dtype = np.dtype(dtype) - def get_data_shape(self): + def get_data_shape(self) -> tuple[int, ...]: return self._shape - def set_data_shape(self, shape): + def set_data_shape(self, shape: Sequence[int]) -> None: ndim = len(shape) if ndim == 0: self._shape = (0,) self._zooms = (1.0,) return - self._shape = tuple([int(s) for s in shape]) + self._shape = tuple(int(s) for s in shape) # set any unset zooms to 1.0 nzs = min(len(self._zooms), ndim) self._zooms = self._zooms[:nzs] + (1.0,) * (ndim - nzs) - def get_zooms(self): + def get_zooms(self) -> tuple[float, ...]: return self._zooms - def set_zooms(self, zooms): - zooms = tuple([float(z) for z in zooms]) + def set_zooms(self, zooms: Sequence[float]) -> None: + zooms = tuple(float(z) for z in zooms) shape = self.get_data_shape() ndim = len(shape) if len(zooms) != ndim: raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) - if len([z for z in zooms if z < 0]): + if any(z < 0 for z in zooms): raise HeaderDataError('zooms must be positive') self._zooms = zooms - def get_base_affine(self): + def get_base_affine(self) -> np.ndarray: shape = self.get_data_shape() zooms = self.get_zooms() return shape_zoom_affine(shape, zooms, self.default_x_flip) get_best_affine = get_base_affine - def data_to_fileobj(self, data, fileobj, rescale=True): + def data_to_fileobj(self, data: npt.ArrayLike, fileobj: io.IOBase, rescale: bool = True): """Write array data `data` as binary to `fileobj` Parameters @@ -264,7 +298,7 @@ def data_to_fileobj(self, data, fileobj, rescale=True): dtype = self.get_data_dtype() fileobj.write(data.astype(dtype).tobytes(order=self.data_layout)) - def data_from_fileobj(self, fileobj): + def data_from_fileobj(self, fileobj: io.IOBase) -> np.ndarray: """Read binary image data from `fileobj`""" dtype = self.get_data_dtype() shape = self.get_data_shape() @@ -274,7 +308,7 @@ def data_from_fileobj(self, fileobj): @cache -def _supported_np_types(klass): +def _supported_np_types(klass: type[HasDtype]) -> set[type[np.generic]]: """Numpy data types that instances of ``klass`` support Parameters @@ -308,7 +342,7 @@ def _supported_np_types(klass): return supported -def supported_np_types(obj): +def supported_np_types(obj: HasDtype) -> set[type[np.generic]]: """Numpy data types that instance `obj` supports Parameters @@ -330,13 +364,15 @@ class ImageDataError(Exception): pass -class SpatialFirstSlicer: +class SpatialFirstSlicer(ty.Generic[SpatialImgT]): """Slicing interface that returns a new image with an updated affine Checks that an image's first three axes are spatial """ - def __init__(self, img): + img: SpatialImgT + + def __init__(self, img: SpatialImgT): # Local import to avoid circular import on module load from .imageclasses import spatial_axes_first @@ -346,7 +382,7 @@ def __init__(self, img): ) self.img = img - def __getitem__(self, slicer): + def __getitem__(self, slicer: object) -> SpatialImgT: try: slicer = self.check_slicing(slicer) except ValueError as err: @@ -359,7 +395,7 @@ def __getitem__(self, slicer): affine = self.slice_affine(slicer) return self.img.__class__(dataobj.copy(), affine, self.img.header) - def check_slicing(self, slicer, return_spatial=False): + def check_slicing(self, slicer: object, return_spatial: bool = False) -> tuple[slice, ...]: """Canonicalize slicers and check for scalar indices in spatial dims Parameters @@ -376,11 +412,11 @@ def check_slicing(self, slicer, return_spatial=False): Validated slicer object that will slice image's `dataobj` without collapsing spatial dimensions """ - slicer = canonical_slicers(slicer, self.img.shape) + canonical = canonical_slicers(slicer, self.img.shape) # We can get away with this because we've checked the image's # first three axes are spatial. # More general slicers will need to be smarter, here. - spatial_slices = slicer[:3] + spatial_slices = canonical[:3] for subslicer in spatial_slices: if subslicer is None: raise IndexError('New axis not permitted in spatial dimensions') @@ -388,9 +424,9 @@ def check_slicing(self, slicer, return_spatial=False): raise IndexError( 'Scalar indices disallowed in spatial dimensions; Use `[x]` or `x:x+1`.' ) - return spatial_slices if return_spatial else slicer + return spatial_slices if return_spatial else canonical - def slice_affine(self, slicer): + def slice_affine(self, slicer: tuple[slice, ...]) -> np.ndarray: """Retrieve affine for current image, if sliced by a given index Applies scaling if down-sampling is applied, and adjusts the intercept @@ -430,10 +466,19 @@ def slice_affine(self, slicer): class SpatialImage(DataobjImage): """Template class for volumetric (3D/4D) images""" - header_class: Type[SpatialHeader] = SpatialHeader - ImageSlicer = SpatialFirstSlicer + header_class: type[SpatialHeader] = SpatialHeader + ImageSlicer: type[SpatialFirstSlicer] = SpatialFirstSlicer + + _header: SpatialHeader - def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): + def __init__( + self, + dataobj: ArrayLike, + affine: np.ndarray, + header: FileBasedHeader | ty.Mapping | None = None, + extra: ty.Mapping | None = None, + file_map: FileMap | None = None, + ): """Initialize image The image is a combination of (array-like, affine matrix, header), with @@ -483,7 +528,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): def affine(self): return self._affine - def update_header(self): + def update_header(self) -> None: """Harmonize header with image data and affine >>> data = np.zeros((2,3,4)) @@ -512,7 +557,7 @@ def update_header(self): return self._affine2header() - def _affine2header(self): + def _affine2header(self) -> None: """Unconditionally set affine into the header""" RZS = self._affine[:3, :3] vox = np.sqrt(np.sum(RZS * RZS, axis=0)) @@ -522,7 +567,7 @@ def _affine2header(self): zooms[:n_to_set] = vox[:n_to_set] hdr.set_zooms(zooms) - def __str__(self): + def __str__(self) -> str: shape = self.shape affine = self.affine return f""" @@ -534,14 +579,14 @@ def __str__(self): {self._header} """ - def get_data_dtype(self): + def get_data_dtype(self) -> np.dtype: return self._header.get_data_dtype() - def set_data_dtype(self, dtype): + def set_data_dtype(self, dtype: npt.DTypeLike) -> None: self._header.set_data_dtype(dtype) @classmethod - def from_image(klass, img): + def from_image(klass: type[SpatialImgT], img: SpatialImage | FileBasedImage) -> SpatialImgT: """Class method to create new instance of own class from `img` Parameters @@ -555,15 +600,17 @@ def from_image(klass, img): cimg : ``spatialimage`` instance Image, of our own class """ - return klass( - img.dataobj, - img.affine, - klass.header_class.from_header(img.header), - extra=img.extra.copy(), - ) + if isinstance(img, SpatialImage): + return klass( + img.dataobj, + img.affine, + klass.header_class.from_header(img.header), + extra=img.extra.copy(), + ) + return super().from_image(img) @property - def slicer(self): + def slicer(self: SpatialImgT) -> SpatialFirstSlicer[SpatialImgT]: """Slicer object that returns cropped and subsampled images The image is resliced in the current orientation; no rotation or @@ -582,7 +629,7 @@ def slicer(self): """ return self.ImageSlicer(self) - def __getitem__(self, idx): + def __getitem__(self, idx: object) -> None: """No slicing or dictionary interface for images Use the slicer attribute to perform cropping and subsampling at your @@ -595,7 +642,7 @@ def __getitem__(self, idx): '`img.get_fdata()[slice]`' ) - def orthoview(self): + def orthoview(self) -> OrthoSlicer3D: """Plot the image using OrthoSlicer3D Returns @@ -611,7 +658,7 @@ def orthoview(self): """ return OrthoSlicer3D(self.dataobj, self.affine, title=self.get_filename()) - def as_reoriented(self, ornt): + def as_reoriented(self: SpatialImgT, ornt: Sequence[Sequence[int]]) -> SpatialImgT: """Apply an orientation change and return a new image If ornt is identity transform, return the original image, unchanged From a12bac7c3589f8b4f94533df3fe1cc88e412e51e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 23 Jan 2023 09:56:45 -0500 Subject: [PATCH 205/702] FIX: Update types based on (unmerged) annotation of fileslice --- nibabel/spatialimages.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index d437cf817a..44a1e11b84 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -395,7 +395,11 @@ def __getitem__(self, slicer: object) -> SpatialImgT: affine = self.slice_affine(slicer) return self.img.__class__(dataobj.copy(), affine, self.img.header) - def check_slicing(self, slicer: object, return_spatial: bool = False) -> tuple[slice, ...]: + def check_slicing( + self, + slicer: object, + return_spatial: bool = False, + ) -> tuple[slice | int | None, ...]: """Canonicalize slicers and check for scalar indices in spatial dims Parameters @@ -426,7 +430,7 @@ def check_slicing(self, slicer: object, return_spatial: bool = False) -> tuple[s ) return spatial_slices if return_spatial else canonical - def slice_affine(self, slicer: tuple[slice, ...]) -> np.ndarray: + def slice_affine(self, slicer: object) -> np.ndarray: """Retrieve affine for current image, if sliced by a given index Applies scaling if down-sampling is applied, and adjusts the intercept From 995dafdd3d7397da2b8dbd76afd8ce29ff77c9be Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 08:26:44 -0500 Subject: [PATCH 206/702] Update nibabel/tests/test_spm99analyze.py Co-authored-by: Zvi Baratz --- nibabel/tests/test_spm99analyze.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 9f1dc63b4d..a8756e3013 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -47,6 +47,8 @@ COMPLEX_TYPES = sorted(sctypes['c'], key=lambda x: x.__name__) INT_TYPES = sorted(sctypes['i'], key=lambda x: x.__name__) UINT_TYPES = sorted(sctypes['u'], key=lambda x: x.__name__) + +# Create combined type lists CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES IUINT_TYPES = INT_TYPES + UINT_TYPES NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES From a25345178906dd5db60dd3cc96a12f46ac120430 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 29 Jan 2023 15:33:28 +0200 Subject: [PATCH 207/702] TYP: Replace deprecated typing.Sequence generic type Co-authored-by: Chris Markiewicz --- nibabel/spatialimages.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 44a1e11b84..4f3648c4d6 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -133,7 +133,8 @@ import io import typing as ty -from typing import Literal, Sequence +from collections.abc import Sequence +from typing import Literal import numpy as np From aa0bfffe8a171767601adcb36537610df4809dc5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 28 Jan 2023 17:17:33 -0500 Subject: [PATCH 208/702] MNT: Update pre-commit hooks STY: Installation issues with isort TYP: Ensure better (but slower) coverage for pre-commit mypy --- .pre-commit-config.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index addd5f5634..3a66205335 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: hooks: - id: blue - repo: https://github.com/pycqa/isort - rev: 5.11.2 + rev: 5.12.0 hooks: - id: isort - repo: https://github.com/pycqa/flake8 @@ -35,5 +35,7 @@ repos: - types-setuptools - types-Pillow - pydicom - # Sync with tool.mypy['exclude'] - exclude: "^(doc|nisext|tools)/|.*/tests/" + - numpy + - pyzstd + args: ["nibabel"] + pass_filenames: false From 47fb8659f09a6367e6d363e2b4cd029d87567da0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 11:01:55 -0500 Subject: [PATCH 209/702] TYP: Annotate tripwire and optpkg modules Refactor _check_pkg_version to make types clearer. Partial application and lambdas seem hard to mypy. --- nibabel/optpkg.py | 35 +++++++++++++++++++++++------------ nibabel/processing.py | 2 +- nibabel/testing/helpers.py | 2 +- nibabel/tripwire.py | 7 ++++--- 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index d1eb9d17d5..b59a89bb35 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,20 +1,31 @@ """Routines to support optional packages""" +from __future__ import annotations + +import typing as ty +from types import ModuleType + from packaging.version import Version from .tripwire import TripWire -def _check_pkg_version(pkg, min_version): - # Default version checking function - if isinstance(min_version, str): - min_version = Version(min_version) - try: - return min_version <= Version(pkg.__version__) - except AttributeError: +def _check_pkg_version(min_version: str | Version) -> ty.Callable[[ModuleType], bool]: + min_ver = Version(min_version) if isinstance(min_version, str) else min_version + + def check(pkg: ModuleType) -> bool: + pkg_ver = getattr(pkg, '__version__', None) + if isinstance(pkg_ver, str): + return min_ver <= Version(pkg_ver) return False + return check + -def optional_package(name, trip_msg=None, min_version=None): +def optional_package( + name: str, + trip_msg: str | None = None, + min_version: str | Version | ty.Callable[[ModuleType], bool] | None = None, +) -> tuple[ModuleType | TripWire, bool, ty.Callable[[], None]]: """Return package-like thing and module setup for package `name` Parameters @@ -81,7 +92,7 @@ def optional_package(name, trip_msg=None, min_version=None): elif min_version is None: check_version = lambda pkg: True else: - check_version = lambda pkg: _check_pkg_version(pkg, min_version) + check_version = _check_pkg_version(min_version) # fromlist=[''] results in submodule being returned, rather than the top # level module. See help(__import__) fromlist = [''] if '.' in name else [] @@ -107,11 +118,11 @@ def optional_package(name, trip_msg=None, min_version=None): trip_msg = ( f'We need package {name} for these functions, but ``import {name}`` raised {exc}' ) - pkg = TripWire(trip_msg) + trip = TripWire(trip_msg) - def setup_module(): + def setup_module() -> None: import unittest raise unittest.SkipTest(f'No {name} for these tests') - return pkg, False, setup_module + return trip, False, setup_module diff --git a/nibabel/processing.py b/nibabel/processing.py index d0a01b52b3..c7bd3888de 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -20,7 +20,7 @@ from .optpkg import optional_package -spnd, _, _ = optional_package('scipy.ndimage') +spnd = optional_package('scipy.ndimage')[0] from .affines import AffineError, append_diag, from_matvec, rescale_affine, to_matvec from .imageclasses import spatial_axes_first diff --git a/nibabel/testing/helpers.py b/nibabel/testing/helpers.py index 35b13049f1..2f25a354d7 100644 --- a/nibabel/testing/helpers.py +++ b/nibabel/testing/helpers.py @@ -6,7 +6,7 @@ from ..optpkg import optional_package -_, have_scipy, _ = optional_package('scipy.io') +have_scipy = optional_package('scipy.io')[1] from numpy.testing import assert_array_equal diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index 3b6ecfbb40..055d0cb291 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -1,5 +1,6 @@ """Class to raise error for missing modules or other misfortunes """ +from typing import Any class TripWireError(AttributeError): @@ -11,7 +12,7 @@ class TripWireError(AttributeError): # is not present. -def is_tripwire(obj): +def is_tripwire(obj: Any) -> bool: """Returns True if `obj` appears to be a TripWire object Examples @@ -44,9 +45,9 @@ class TripWire: TripWireError: We do not have a_module """ - def __init__(self, msg): + def __init__(self, msg: str): self._msg = msg - def __getattr__(self, attr_name): + def __getattr__(self, attr_name: str) -> Any: """Raise informative error accessing attributes""" raise TripWireError(self._msg) From 72d7eff962bfb528d1bceb53709f41b5a57cfd6f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 11:03:05 -0500 Subject: [PATCH 210/702] TYP: Annotate deprecation and versioning machinery --- nibabel/deprecated.py | 19 ++++++++------ nibabel/deprecator.py | 58 +++++++++++++++++++++++++++++++------------ nibabel/pkg_info.py | 4 +-- 3 files changed, 55 insertions(+), 26 deletions(-) diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index eb3252fe7e..07965e69a0 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -2,12 +2,15 @@ """ from __future__ import annotations +import typing as ty import warnings -from typing import Type from .deprecator import Deprecator from .pkg_info import cmp_pkg_version +if ty.TYPE_CHECKING: # pragma: no cover + P = ty.ParamSpec('P') + class ModuleProxy: """Proxy for module that may not yet have been imported @@ -30,14 +33,14 @@ class ModuleProxy: module. """ - def __init__(self, module_name): + def __init__(self, module_name: str): self._module_name = module_name - def __getattr__(self, key): + def __getattr__(self, key: str) -> ty.Any: mod = __import__(self._module_name, fromlist=['']) return getattr(mod, key) - def __repr__(self): + def __repr__(self) -> str: return f'' @@ -60,7 +63,7 @@ class FutureWarningMixin: warn_message = 'This class will be removed in future versions' - def __init__(self, *args, **kwargs): + def __init__(self, *args: P.args, **kwargs: P.kwargs) -> None: warnings.warn(self.warn_message, FutureWarning, stacklevel=2) super().__init__(*args, **kwargs) @@ -85,12 +88,12 @@ def alert_future_error( msg: str, version: str, *, - warning_class: Type[Warning] = FutureWarning, - error_class: Type[Exception] = RuntimeError, + warning_class: type[Warning] = FutureWarning, + error_class: type[Exception] = RuntimeError, warning_rec: str = '', error_rec: str = '', stacklevel: int = 2, -): +) -> None: """Warn or error with appropriate messages for changing functionality. Parameters diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 251e10d64c..3ef6b45066 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -1,10 +1,16 @@ """Class for recording and reporting deprecations """ +from __future__ import annotations import functools import re +import typing as ty import warnings +if ty.TYPE_CHECKING: # pragma: no cover + T = ty.TypeVar('T') + P = ty.ParamSpec('P') + _LEADING_WHITE = re.compile(r'^(\s*)') TESTSETUP = """ @@ -38,7 +44,7 @@ class ExpiredDeprecationError(RuntimeError): pass -def _ensure_cr(text): +def _ensure_cr(text: str) -> str: """Remove trailing whitespace and add carriage return Ensures that `text` always ends with a carriage return @@ -46,7 +52,12 @@ def _ensure_cr(text): return text.rstrip() + '\n' -def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): +def _add_dep_doc( + old_doc: str, + dep_doc: str, + setup: str = '', + cleanup: str = '', +) -> str: """Add deprecation message `dep_doc` to docstring in `old_doc` Parameters @@ -55,6 +66,10 @@ def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): Docstring from some object. dep_doc : str Deprecation warning to add to top of docstring, after initial line. + setup : str, optional + Doctest setup text + cleanup : str, optional + Doctest teardown text Returns ------- @@ -76,7 +91,9 @@ def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): if next_line >= len(old_lines): # nothing following first paragraph, just append message return old_doc + '\n' + dep_doc - indent = _LEADING_WHITE.match(old_lines[next_line]).group() + leading_white = _LEADING_WHITE.match(old_lines[next_line]) + assert leading_white is not None # Type narrowing, since this always matches + indent = leading_white.group() setup_lines = [indent + L for L in setup.splitlines()] dep_lines = [indent + L for L in [''] + dep_doc.splitlines() + ['']] cleanup_lines = [indent + L for L in cleanup.splitlines()] @@ -113,15 +130,15 @@ class Deprecator: def __init__( self, - version_comparator, - warn_class=DeprecationWarning, - error_class=ExpiredDeprecationError, - ): + version_comparator: ty.Callable[[str], int], + warn_class: type[Warning] = DeprecationWarning, + error_class: type[Exception] = ExpiredDeprecationError, + ) -> None: self.version_comparator = version_comparator self.warn_class = warn_class self.error_class = error_class - def is_bad_version(self, version_str): + def is_bad_version(self, version_str: str) -> bool: """Return True if `version_str` is too high Tests `version_str` with ``self.version_comparator`` @@ -139,7 +156,14 @@ def is_bad_version(self, version_str): """ return self.version_comparator(version_str) == -1 - def __call__(self, message, since='', until='', warn_class=None, error_class=None): + def __call__( + self, + message: str, + since: str = '', + until: str = '', + warn_class: type[Warning] | None = None, + error_class: type[Exception] | None = None, + ) -> ty.Callable[[ty.Callable[P, T]], ty.Callable[P, T]]: """Return decorator function function for deprecation warning / error Parameters @@ -164,8 +188,8 @@ def __call__(self, message, since='', until='', warn_class=None, error_class=Non deprecator : func Function returning a decorator. """ - warn_class = warn_class or self.warn_class - error_class = error_class or self.error_class + exception = error_class if error_class is not None else self.error_class + warning = warn_class if warn_class is not None else self.warn_class messages = [message] if (since, until) != ('', ''): messages.append('') @@ -174,19 +198,21 @@ def __call__(self, message, since='', until='', warn_class=None, error_class=Non if until: messages.append( f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} " - f'{error_class} as of version: {until}' + f'{exception} as of version: {until}' ) message = '\n'.join(messages) - def deprecator(func): + def deprecator(func: ty.Callable[P, T]) -> ty.Callable[P, T]: @functools.wraps(func) - def deprecated_func(*args, **kwargs): + def deprecated_func(*args: P.args, **kwargs: P.kwargs) -> T: if until and self.is_bad_version(until): - raise error_class(message) - warnings.warn(message, warn_class, stacklevel=2) + raise exception(message) + warnings.warn(message, warning, stacklevel=2) return func(*args, **kwargs) keep_doc = deprecated_func.__doc__ + if keep_doc is None: + keep_doc = '' setup = TESTSETUP cleanup = TESTCLEANUP # After expiration, remove all but the first paragraph. diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 73dfd92ed2..061cc3e6d1 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -14,7 +14,7 @@ COMMIT_HASH = '$Format:%h$' -def _cmp(a, b) -> int: +def _cmp(a: Version, b: Version) -> int: """Implementation of ``cmp`` for Python 3""" return (a > b) - (a < b) @@ -113,7 +113,7 @@ def pkg_commit_hash(pkg_path: str | None = None) -> tuple[str, str]: return '(none found)', '' -def get_pkg_info(pkg_path: str) -> dict: +def get_pkg_info(pkg_path: str) -> dict[str, str]: """Return dict describing the context of this package Parameters From 62a95f6b37199acd847d2db0272fda3b229f3d90 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 11:43:02 -0500 Subject: [PATCH 211/702] TYP: Annotate onetime module --- nibabel/onetime.py | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 8156b1a403..d84b7e86ca 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -19,6 +19,12 @@ [2] Python data model, https://docs.python.org/reference/datamodel.html """ +from __future__ import annotations + +import typing as ty + +InstanceT = ty.TypeVar('InstanceT') +T = ty.TypeVar('T') from nibabel.deprecated import deprecate_with_version @@ -96,26 +102,24 @@ class ResetMixin: 10.0 """ - def reset(self): + def reset(self) -> None: """Reset all OneTimeProperty attributes that may have fired already.""" - instdict = self.__dict__ - classdict = self.__class__.__dict__ # To reset them, we simply remove them from the instance dict. At that # point, it's as if they had never been computed. On the next access, # the accessor function from the parent class will be called, simply # because that's how the python descriptor protocol works. - for mname, mval in classdict.items(): - if mname in instdict and isinstance(mval, OneTimeProperty): + for mname, mval in self.__class__.__dict__.items(): + if mname in self.__dict__ and isinstance(mval, OneTimeProperty): delattr(self, mname) -class OneTimeProperty: +class OneTimeProperty(ty.Generic[T]): """A descriptor to make special properties that become normal attributes. This is meant to be used mostly by the auto_attr decorator in this module. """ - def __init__(self, func): + def __init__(self, func: ty.Callable[[InstanceT], T]): """Create a OneTimeProperty instance. Parameters @@ -128,24 +132,35 @@ def __init__(self, func): """ self.getter = func self.name = func.__name__ + self.__doc__ = func.__doc__ + + @ty.overload + def __get__( + self, obj: None, objtype: type[InstanceT] | None = None + ) -> ty.Callable[[InstanceT], T]: + ... # pragma: no cover + + @ty.overload + def __get__(self, obj: InstanceT, objtype: type[InstanceT] | None = None) -> T: + ... # pragma: no cover - def __get__(self, obj, type=None): + def __get__( + self, obj: InstanceT | None, objtype: type[InstanceT] | None = None + ) -> T | ty.Callable[[InstanceT], T]: """This will be called on attribute access on the class or instance.""" if obj is None: # Being called on the class, return the original function. This # way, introspection works on the class. - # return func return self.getter - # Errors in the following line are errors in setting a - # OneTimeProperty + # Errors in the following line are errors in setting a OneTimeProperty val = self.getter(obj) - setattr(obj, self.name, val) + obj.__dict__[self.name] = val return val -def auto_attr(func): +def auto_attr(func: ty.Callable[[InstanceT], T]) -> OneTimeProperty[T]: """Decorator to create OneTimeProperty attributes. Parameters From 4a676c5c73b2ce1bfea01ade879595fea46e31f9 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 30 Jan 2023 06:25:45 -0500 Subject: [PATCH 212/702] TYP: Add None return type to __init__ methods Co-authored-by: Zvi Baratz --- nibabel/deprecated.py | 2 +- nibabel/onetime.py | 2 +- nibabel/tripwire.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 07965e69a0..c353071954 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -33,7 +33,7 @@ class ModuleProxy: module. """ - def __init__(self, module_name: str): + def __init__(self, module_name: str) -> None: self._module_name = module_name def __getattr__(self, key: str) -> ty.Any: diff --git a/nibabel/onetime.py b/nibabel/onetime.py index d84b7e86ca..7c723d4c83 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -119,7 +119,7 @@ class OneTimeProperty(ty.Generic[T]): This is meant to be used mostly by the auto_attr decorator in this module. """ - def __init__(self, func: ty.Callable[[InstanceT], T]): + def __init__(self, func: ty.Callable[[InstanceT], T]) -> None: """Create a OneTimeProperty instance. Parameters diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index 055d0cb291..d0c3d4c50c 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -45,7 +45,7 @@ class TripWire: TripWireError: We do not have a_module """ - def __init__(self, msg: str): + def __init__(self, msg: str) -> None: self._msg = msg def __getattr__(self, attr_name: str) -> Any: From 015608c1712944234a88c0956d3c2f2386dfbcf4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 20:11:29 -0500 Subject: [PATCH 213/702] TEST: Remove final distutils import --- nibabel/tests/test_casting.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index 62da526319..a082394b7b 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -233,10 +233,15 @@ def test_best_float(): def test_longdouble_precision_improved(): - # Just check that this can only be True on windows, msvc - from numpy.distutils.ccompiler import get_default_compiler + # Just check that this can only be True on Windows - if not (os.name == 'nt' and get_default_compiler() == 'msvc'): + # This previously used distutils.ccompiler.get_default_compiler to check for msvc + # In https://github.com/python/cpython/blob/3467991/Lib/distutils/ccompiler.py#L919-L956 + # we see that this was implied by os.name == 'nt', so we can remove this deprecated + # call. + # However, there may be detectable conditions in Windows where we would expect this + # to be False as well. + if os.name != 'nt': assert not longdouble_precision_improved() From 4ac1c0a9737f4038a3fa403846271cded8d139b1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 20:27:48 -0500 Subject: [PATCH 214/702] MNT: Add importlib_resources to typing environment --- .pre-commit-config.yaml | 1 + pyproject.toml | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3a66205335..1fc7efd0b9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,5 +37,6 @@ repos: - pydicom - numpy - pyzstd + - importlib_resources args: ["nibabel"] pass_filenames: false diff --git a/pyproject.toml b/pyproject.toml index 6d44c607ed..aebdccc7a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,7 +68,14 @@ test = [ "pytest-httpserver", "pytest-xdist", ] -typing = ["mypy", "pytest", "types-setuptools", "types-Pillow", "pydicom"] +typing = [ + "mypy", + "pytest", + "types-setuptools", + "types-Pillow", + "pydicom", + "importlib_resources", +] zstd = ["pyzstd >= 0.14.3"] [tool.hatch.build.targets.sdist] From 9c8cd1f016b779aaa08f565efd8885c27e5feb72 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 20:28:02 -0500 Subject: [PATCH 215/702] RF: Use importlib_resources over pkg_resources --- nibabel/__init__.py | 13 +++++++++---- nibabel/testing/__init__.py | 24 ++++++++++++++++++------ nibabel/tests/test_init.py | 16 ++++++++++------ nibabel/tests/test_testing.py | 8 +++++--- 4 files changed, 42 insertions(+), 19 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 4311e3d7bf..50dca14515 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -171,11 +171,16 @@ def bench(label=None, verbose=1, extra_argv=None): code : ExitCode Returns the result of running the tests as a ``pytest.ExitCode`` enum """ - from pkg_resources import resource_filename + try: + from importlib.resources import as_file, files + except ImportError: + from importlib_resources import as_file, files - config = resource_filename('nibabel', 'benchmarks/pytest.benchmark.ini') args = [] if extra_argv is not None: args.extend(extra_argv) - args.extend(['-c', config]) - return test(label, verbose, extra_argv=args) + + config_path = files('nibabel') / 'benchmarks/pytest.benchmark.ini' + with as_file(config_path) as config: + args.extend(['-c', str(config)]) + return test(label, verbose, extra_argv=args) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index bcd62e470c..fb9141c17c 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -7,10 +7,12 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for testing""" +from __future__ import annotations import os import re import sys +import typing as ty import unittest import warnings from contextlib import nullcontext @@ -19,24 +21,34 @@ import numpy as np import pytest from numpy.testing import assert_array_equal -from pkg_resources import resource_filename from .helpers import assert_data_similar, bytesio_filemap, bytesio_round_trip from .np_features import memmap_after_ufunc +try: + from importlib.abc import Traversable + from importlib.resources import as_file, files +except ImportError: # PY38 + from importlib_resources import as_file, files + from importlib_resources.abc import Traversable -def test_data(subdir=None, fname=None): + +def test_data( + subdir: ty.Literal['gifti', 'nicom', 'externals'] | None = None, + fname: str | None = None, +) -> Traversable: + parts: tuple[str, ...] if subdir is None: - resource = os.path.join('tests', 'data') + parts = ('tests', 'data') elif subdir in ('gifti', 'nicom', 'externals'): - resource = os.path.join(subdir, 'tests', 'data') + parts = (subdir, 'tests', 'data') else: raise ValueError(f'Unknown test data directory: {subdir}') if fname is not None: - resource = os.path.join(resource, fname) + parts += (fname,) - return resource_filename('nibabel', resource) + return files('nibabel').joinpath(*parts) # set path to example data diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index ff4dc082f6..877c045f6e 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -1,7 +1,12 @@ +import pathlib from unittest import mock import pytest -from pkg_resources import resource_filename + +try: + from importlib.resources import as_file, files +except ImportError: + from importlib_resources import as_file, files import nibabel as nib @@ -38,12 +43,11 @@ def test_nibabel_test_errors(): def test_nibabel_bench(): - expected_args = ['-c', '--pyargs', 'nibabel'] + config_path = files('nibabel') / 'benchmarks/pytest.benchmark.ini' + if not isinstance(config_path, pathlib.Path): + raise unittest.SkipTest('Package is not unpacked; could get temp path') - try: - expected_args.insert(1, resource_filename('nibabel', 'benchmarks/pytest.benchmark.ini')) - except: - raise unittest.SkipTest('Not installed') + expected_args = ['-c', str(config_path), '--pyargs', 'nibabel'] with mock.patch('pytest.main') as pytest_main: nib.bench(verbose=0) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 38c815d4c8..8504627e1c 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -171,12 +171,14 @@ def test_assert_re_in(regex, entries): def test_test_data(): - assert test_data() == data_path - assert test_data() == os.path.abspath( + assert str(test_data()) == str(data_path) + assert str(test_data()) == os.path.abspath( os.path.join(os.path.dirname(__file__), '..', 'tests', 'data') ) for subdir in ('nicom', 'gifti', 'externals'): - assert test_data(subdir) == os.path.join(data_path[:-10], subdir, 'tests', 'data') + assert str(test_data(subdir)) == os.path.join( + data_path.parent.parent, subdir, 'tests', 'data' + ) assert os.path.exists(test_data(subdir)) assert not os.path.exists(test_data(subdir, 'doesnotexist')) From 8891d8718b4dc032e215a7b70982263e0b08c12b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 20:28:28 -0500 Subject: [PATCH 216/702] FIX: Swapped source and commit hash --- nibabel/pkg_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 061cc3e6d1..7e816939d5 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -101,7 +101,7 @@ def pkg_commit_hash(pkg_path: str | None = None) -> tuple[str, str]: return 'archive substitution', COMMIT_HASH ver = Version(__version__) if ver.local is not None and ver.local.startswith('g'): - return ver.local[1:8], 'installation' + return 'installation', ver.local[1:8] # maybe we are in a repository proc = run( ('git', 'rev-parse', '--short', 'HEAD'), From 7a35fc92b21e13c039779c67c9c3c2d40ee583a4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 20:56:34 -0500 Subject: [PATCH 217/702] MNT: Drop setuptools dependency, require importlib_resources for PY38 --- pyproject.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index aebdccc7a7..e002f6d053 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,11 @@ maintainers = [{ name = "Christopher Markiewicz" }] readme = "README.rst" license = { text = "MIT License" } requires-python = ">=3.8" -dependencies = ["numpy >=1.19", "packaging >=17", "setuptools"] +dependencies = [ + "numpy >=1.19", + "packaging >=17", + "importlib_resources; python_version < '3.9'", +] classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", From fa5f9207fbc1bee9e39bac865c80afb6987e13e1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 30 Jan 2023 16:24:50 -0500 Subject: [PATCH 218/702] TEST: Simplify and comment test_data tests --- nibabel/tests/test_testing.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 8504627e1c..a2a9496d70 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -171,14 +171,14 @@ def test_assert_re_in(regex, entries): def test_test_data(): - assert str(test_data()) == str(data_path) + assert str(test_data()) == str(data_path) # Always get the same result + # Works the same as using __file__ and os.path utilities assert str(test_data()) == os.path.abspath( os.path.join(os.path.dirname(__file__), '..', 'tests', 'data') ) + # Check action of subdir and that existence checks work for subdir in ('nicom', 'gifti', 'externals'): - assert str(test_data(subdir)) == os.path.join( - data_path.parent.parent, subdir, 'tests', 'data' - ) + assert test_data(subdir) == data_path.parent.parent / subdir / 'tests' / 'data' assert os.path.exists(test_data(subdir)) assert not os.path.exists(test_data(subdir, 'doesnotexist')) From ad439f5e9f9c3c65d16969683b08cb15b37d7ee4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 30 Jan 2023 16:25:32 -0500 Subject: [PATCH 219/702] RF: Rename testing.test_data to testing.get_test_data --- nibabel/cmdline/tests/test_conform.py | 6 +++--- nibabel/cmdline/tests/test_convert.py | 14 +++++++------- nibabel/gifti/gifti.py | 6 +++--- nibabel/gifti/tests/test_gifti.py | 10 +++++----- nibabel/testing/__init__.py | 4 ++-- nibabel/tests/test_testing.py | 18 +++++++++--------- 6 files changed, 29 insertions(+), 29 deletions(-) diff --git a/nibabel/cmdline/tests/test_conform.py b/nibabel/cmdline/tests/test_conform.py index 524e81fc79..dbbf96186f 100644 --- a/nibabel/cmdline/tests/test_conform.py +++ b/nibabel/cmdline/tests/test_conform.py @@ -15,7 +15,7 @@ import nibabel as nib from nibabel.cmdline.conform import main from nibabel.optpkg import optional_package -from nibabel.testing import test_data +from nibabel.testing import get_test_data _, have_scipy, _ = optional_package('scipy.ndimage') needs_scipy = unittest.skipUnless(have_scipy, 'These tests need scipy') @@ -23,7 +23,7 @@ @needs_scipy def test_default(tmpdir): - infile = test_data(fname='anatomical.nii') + infile = get_test_data(fname='anatomical.nii') outfile = tmpdir / 'output.nii.gz' main([str(infile), str(outfile)]) assert outfile.isfile() @@ -41,7 +41,7 @@ def test_default(tmpdir): @needs_scipy def test_nondefault(tmpdir): - infile = test_data(fname='anatomical.nii') + infile = get_test_data(fname='anatomical.nii') outfile = tmpdir / 'output.nii.gz' out_shape = (100, 100, 150) voxel_size = (1, 2, 4) diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 411726a9ea..4605bc810d 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -13,11 +13,11 @@ import nibabel as nib from nibabel.cmdline import convert -from nibabel.testing import test_data +from nibabel.testing import get_test_data def test_convert_noop(tmp_path): - infile = test_data(fname='anatomical.nii') + infile = get_test_data(fname='anatomical.nii') outfile = tmp_path / 'output.nii.gz' orig = nib.load(infile) @@ -31,7 +31,7 @@ def test_convert_noop(tmp_path): assert converted.shape == orig.shape assert converted.get_data_dtype() == orig.get_data_dtype() - infile = test_data(fname='resampled_anat_moved.nii') + infile = get_test_data(fname='resampled_anat_moved.nii') with pytest.raises(FileExistsError): convert.main([str(infile), str(outfile)]) @@ -50,7 +50,7 @@ def test_convert_noop(tmp_path): @pytest.mark.parametrize('data_dtype', ('u1', 'i2', 'float32', 'float', 'int64')) def test_convert_dtype(tmp_path, data_dtype): - infile = test_data(fname='anatomical.nii') + infile = get_test_data(fname='anatomical.nii') outfile = tmp_path / 'output.nii.gz' orig = nib.load(infile) @@ -78,7 +78,7 @@ def test_convert_dtype(tmp_path, data_dtype): ], ) def test_convert_by_extension(tmp_path, ext, img_class): - infile = test_data(fname='anatomical.nii') + infile = get_test_data(fname='anatomical.nii') outfile = tmp_path / f'output.{ext}' orig = nib.load(infile) @@ -102,7 +102,7 @@ def test_convert_by_extension(tmp_path, ext, img_class): ], ) def test_convert_imgtype(tmp_path, ext, img_class): - infile = test_data(fname='anatomical.nii') + infile = get_test_data(fname='anatomical.nii') outfile = tmp_path / f'output.{ext}' orig = nib.load(infile) @@ -118,7 +118,7 @@ def test_convert_imgtype(tmp_path, ext, img_class): def test_convert_nifti_int_fail(tmp_path): - infile = test_data(fname='anatomical.nii') + infile = get_test_data(fname='anatomical.nii') outfile = tmp_path / f'output.nii' orig = nib.load(infile) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 919e4faef2..326e60fa2e 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -701,8 +701,8 @@ def agg_data(self, intent_code=None): Consider a surface GIFTI file: >>> import nibabel as nib - >>> from nibabel.testing import test_data - >>> surf_img = nib.load(test_data('gifti', 'ascii.gii')) + >>> from nibabel.testing import get_test_data + >>> surf_img = nib.load(get_test_data('gifti', 'ascii.gii')) The coordinate data, which is indicated by the ``NIFTI_INTENT_POINTSET`` intent code, may be retrieved using any of the following equivalent @@ -754,7 +754,7 @@ def agg_data(self, intent_code=None): The following image is a GIFTI file with ten (10) data arrays of the same size, and with intent code 2001 (``NIFTI_INTENT_TIME_SERIES``): - >>> func_img = nib.load(test_data('gifti', 'task.func.gii')) + >>> func_img = nib.load(get_test_data('gifti', 'task.func.gii')) When aggregating time series data, these arrays are concatenated into a single, vertex-by-timestep array: diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 49a8cbc07f..cd87bcfeea 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -14,7 +14,7 @@ from ... import load from ...fileholders import FileHolder from ...nifti1 import data_type_codes -from ...testing import test_data +from ...testing import get_test_data from .. import ( GiftiCoordSystem, GiftiDataArray, @@ -35,9 +35,9 @@ def test_agg_data(): - surf_gii_img = load(test_data('gifti', 'ascii.gii')) - func_gii_img = load(test_data('gifti', 'task.func.gii')) - shape_gii_img = load(test_data('gifti', 'rh.shape.curv.gii')) + surf_gii_img = load(get_test_data('gifti', 'ascii.gii')) + func_gii_img = load(get_test_data('gifti', 'task.func.gii')) + shape_gii_img = load(get_test_data('gifti', 'rh.shape.curv.gii')) # add timeseries data with intent code ``none`` point_data = surf_gii_img.get_arrays_from_intent('pointset')[0].data @@ -478,7 +478,7 @@ def test_darray_dtype_coercion_failures(): def test_gifti_file_close(recwarn): - gii = load(test_data('gifti', 'ascii.gii')) + gii = load(get_test_data('gifti', 'ascii.gii')) with InTemporaryDirectory(): gii.to_filename('test.gii') assert not any(isinstance(r.message, ResourceWarning) for r in recwarn) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index fb9141c17c..5baa5e2b86 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -33,7 +33,7 @@ from importlib_resources.abc import Traversable -def test_data( +def get_test_data( subdir: ty.Literal['gifti', 'nicom', 'externals'] | None = None, fname: str | None = None, ) -> Traversable: @@ -52,7 +52,7 @@ def test_data( # set path to example data -data_path = test_data() +data_path = get_test_data() def assert_dt_equal(a, b): diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index a2a9496d70..8cd70e37a9 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -15,8 +15,8 @@ data_path, error_warnings, get_fresh_mod, + get_test_data, suppress_warnings, - test_data, ) @@ -171,22 +171,22 @@ def test_assert_re_in(regex, entries): def test_test_data(): - assert str(test_data()) == str(data_path) # Always get the same result + assert str(get_test_data()) == str(data_path) # Always get the same result # Works the same as using __file__ and os.path utilities - assert str(test_data()) == os.path.abspath( + assert str(get_test_data()) == os.path.abspath( os.path.join(os.path.dirname(__file__), '..', 'tests', 'data') ) # Check action of subdir and that existence checks work for subdir in ('nicom', 'gifti', 'externals'): - assert test_data(subdir) == data_path.parent.parent / subdir / 'tests' / 'data' - assert os.path.exists(test_data(subdir)) - assert not os.path.exists(test_data(subdir, 'doesnotexist')) + assert get_test_data(subdir) == data_path.parent.parent / subdir / 'tests' / 'data' + assert os.path.exists(get_test_data(subdir)) + assert not os.path.exists(get_test_data(subdir, 'doesnotexist')) for subdir in ('freesurfer', 'doesnotexist'): with pytest.raises(ValueError): - test_data(subdir) + get_test_data(subdir) - assert not os.path.exists(test_data(None, 'doesnotexist')) + assert not os.path.exists(get_test_data(None, 'doesnotexist')) for subdir, fname in [ ('gifti', 'ascii.gii'), @@ -194,4 +194,4 @@ def test_test_data(): ('externals', 'example_1.nc'), (None, 'empty.tck'), ]: - assert os.path.exists(test_data(subdir, fname)) + assert os.path.exists(get_test_data(subdir, fname)) From aaeca86e913b295fa1e1f6b9580bcef102ab71c4 Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Wed, 1 Feb 2023 22:25:11 -0500 Subject: [PATCH 220/702] Added distribution badges --- README.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.rst b/README.rst index 1afdbc511a..6dfcc3d584 100644 --- a/README.rst +++ b/README.rst @@ -7,6 +7,14 @@ .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.591597.svg :target: https://doi.org/10.5281/zenodo.591597 +.. image :: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable + :target: https://repology.org/project/nibabel/versions + :alt: Debian Unstable package + +.. image:: https://repology.org/badge/version-for-repo/gentoo_ovl_science/nibabel.svg?header=Gentoo%20%28%3A%3Ascience%29 + :target: https://repology.org/project/nibabel/versions + :alt: Gentoo (::science) + .. Following contents should be from LONG_DESCRIPTION in nibabel/info.py From 60e1ca2c6b8bbe87bbc26258e8c40cc62c4bf07d Mon Sep 17 00:00:00 2001 From: Michiel Cottaar Date: Thu, 2 Feb 2023 16:59:01 +0000 Subject: [PATCH 221/702] BF: Support ragged voxel arrays in ParcelsAxis In the past we used `np.asanyarray(voxels)`, which would produce an array with dtype="object" if provided with a ragged array. This no longer works in numpy 1.24. --- nibabel/cifti2/cifti2_axes.py | 11 +++-------- nibabel/cifti2/tests/test_axes.py | 23 ++++++++++++++++++++++- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 3142c8362b..63275c9c42 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -775,14 +775,9 @@ def __init__(self, name, voxels, vertices, affine=None, volume_shape=None, nvert maps names of surface elements to integers (not needed for volumetric CIFTI-2 files) """ self.name = np.asanyarray(name, dtype='U') - as_array = np.asanyarray(voxels) - if as_array.ndim == 1: - voxels = as_array.astype('object') - else: - voxels = np.empty(len(voxels), dtype='object') - for idx in range(len(voxels)): - voxels[idx] = as_array[idx] - self.voxels = np.asanyarray(voxels, dtype='object') + self.voxels = np.empty(len(voxels), dtype='object') + for idx in range(len(voxels)): + self.voxels[idx] = voxels[idx] self.vertices = np.asanyarray(vertices, dtype='object') self.affine = np.asanyarray(affine) if affine is not None else None self.volume_shape = volume_shape diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index 4cabd188b1..245964502f 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -494,13 +494,34 @@ def test_parcels(): assert prc != prc_other # test direct initialisation - axes.ParcelsAxis( + test_parcel = axes.ParcelsAxis( voxels=[np.ones((3, 2), dtype=int)], vertices=[{}], name=['single_voxel'], affine=np.eye(4), volume_shape=(2, 3, 4), ) + assert len(test_parcel) == 1 + + # test direct initialisation with multiple parcels + test_parcel = axes.ParcelsAxis( + voxels=[np.ones((3, 2), dtype=int), np.zeros((3, 2), dtype=int)], + vertices=[{}, {}], + name=['first_parcel', 'second_parcel'], + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) + assert len(test_parcel) == 2 + + # test direct initialisation with ragged voxel/vertices array + test_parcel = axes.ParcelsAxis( + voxels=[np.ones((3, 2), dtype=int), np.zeros((5, 2), dtype=int)], + vertices=[{}, {}], + name=['first_parcel', 'second_parcel'], + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) + assert len(test_parcel) == 2 with pytest.raises(ValueError): axes.ParcelsAxis( From c3967d3b246a977d86ef15110650eae5f6e7760b Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Thu, 2 Feb 2023 14:04:53 -0500 Subject: [PATCH 222/702] Typo --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 6dfcc3d584..032c4e6d72 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.591597.svg :target: https://doi.org/10.5281/zenodo.591597 -.. image :: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable +.. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable :target: https://repology.org/project/nibabel/versions :alt: Debian Unstable package From 9ec8b7cccce9a7f1797224ab5292fb2ffe5bfaa4 Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Thu, 2 Feb 2023 14:07:14 -0500 Subject: [PATCH 223/702] Added AUR --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index 032c4e6d72..4cc9081be3 100644 --- a/README.rst +++ b/README.rst @@ -7,6 +7,10 @@ .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.591597.svg :target: https://doi.org/10.5281/zenodo.591597 +.. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 + :target: https://repology.org/project/python:nibabel/versions + :alt: Arch (AUR) + .. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable :target: https://repology.org/project/nibabel/versions :alt: Debian Unstable package From 6553bcaf923626e3d67b99798acb1a728f19dfb9 Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Thu, 2 Feb 2023 14:10:37 -0500 Subject: [PATCH 224/702] Added nix badge --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index 4cc9081be3..3378e751c2 100644 --- a/README.rst +++ b/README.rst @@ -19,6 +19,10 @@ :target: https://repology.org/project/nibabel/versions :alt: Gentoo (::science) +.. image:: https://repology.org/badge/version-for-repo/nix_unstable/python:nibabel.svg?header=nixpkgs%20unstable + :target: https://repology.org/project/python:nibabel/versions + :alt: nixpkgs unstable + .. Following contents should be from LONG_DESCRIPTION in nibabel/info.py From 870f106b9d13d7a6d00f71df0e997b5d4e048c66 Mon Sep 17 00:00:00 2001 From: Michiel Cottaar Date: Thu, 2 Feb 2023 20:32:44 +0000 Subject: [PATCH 225/702] Use enumerate to iterate over voxels Co-authored-by: Chris Markiewicz --- nibabel/cifti2/cifti2_axes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 63275c9c42..0c75190f80 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -776,8 +776,8 @@ def __init__(self, name, voxels, vertices, affine=None, volume_shape=None, nvert """ self.name = np.asanyarray(name, dtype='U') self.voxels = np.empty(len(voxels), dtype='object') - for idx in range(len(voxels)): - self.voxels[idx] = voxels[idx] + for idx, vox in enumerate(voxels): + self.voxels[idx] = vox self.vertices = np.asanyarray(vertices, dtype='object') self.affine = np.asanyarray(affine) if affine is not None else None self.volume_shape = volume_shape From 41ce88c09c83bd3f01ed6c4b32ca8d4860946e93 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 12 Jan 2023 21:37:10 -0500 Subject: [PATCH 226/702] TEST: Check that quaternions.fillpositive does not augment unit vectors --- nibabel/tests/test_quaternions.py | 59 +++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index a3e63dd851..a02c02564b 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -16,6 +16,18 @@ from .. import eulerangles as nea from .. import quaternions as nq + +def norm(vec): + # Return unit vector with same orientation as input vector + return vec / np.sqrt(vec @ vec) + + +def gen_vec(dtype): + # Generate random 3-vector in [-1, 1]^3 + rand = np.random.default_rng() + return rand.uniform(low=-1.0, high=1.0, size=(3,)).astype(dtype) + + # Example rotations eg_rots = [] params = (-pi, pi, pi / 2) @@ -69,6 +81,53 @@ def test_fillpos(): assert wxyz[0] == 0.0 +@pytest.mark.parametrize('dtype', ('f4', 'f8')) +def test_fillpositive_plus_minus_epsilon(dtype): + # Deterministic test for fillpositive threshold + # We are trying to fill (x, y, z) with a w such that |(w, x, y, z)| == 1 + # If |(x, y, z)| is slightly off one, w should still be 0 + nptype = np.dtype(dtype).type + + # Obviously, |(x, y, z)| == 1 + baseline = np.array([0, 0, 1], dtype=dtype) + + # Obviously, |(x, y, z)| ~ 1 + plus = baseline * nptype(1 + np.finfo(dtype).eps) + minus = baseline * nptype(1 - np.finfo(dtype).eps) + + assert nq.fillpositive(plus)[0] == 0.0 + assert nq.fillpositive(minus)[0] == 0.0 + + +@pytest.mark.parametrize('dtype', ('f4', 'f8')) +def test_fillpositive_simulated_error(dtype): + # Nondeterministic test for fillpositive threshold + # Create random vectors, normalize to unit length, and count on floating point + # error to result in magnitudes larger/smaller than one + # This is to simulate cases where a unit quaternion with w == 0 would be encoded + # as xyz with small error, and we want to recover the w of 0 + + # Permit 1 epsilon per value (default, but make explicit here) + w2_thresh = 3 * -np.finfo(dtype).eps + + pos_error = neg_error = False + for _ in range(50): + xyz = norm(gen_vec(dtype)) + + wxyz = nq.fillpositive(xyz, w2_thresh) + assert wxyz[0] == 0.0 + + # Verify that we exercise the threshold + magnitude = xyz @ xyz + if magnitude < 1: + pos_error = True + elif magnitude > 1: + neg_error = True + + assert pos_error, 'Did not encounter a case where 1 - |xyz| > 0' + assert neg_error, 'Did not encounter a case where 1 - |xyz| < 0' + + def test_conjugate(): # Takes sequence cq = nq.conjugate((1, 0, 0, 0)) From 943c13d838da9da277d2599345c645d191c44b84 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 13 Jan 2023 06:50:02 -0500 Subject: [PATCH 227/702] ENH: Set symmetric threshold for identifying unit quaternions in qform calculation --- nibabel/quaternions.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index c14e5a2731..f549605f50 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -42,7 +42,7 @@ def fillpositive(xyz, w2_thresh=None): xyz : iterable iterable containing 3 values, corresponding to quaternion x, y, z w2_thresh : None or float, optional - threshold to determine if w squared is really negative. + threshold to determine if w squared is non-zero. If None (default) then w2_thresh set equal to ``-np.finfo(xyz.dtype).eps``, if possible, otherwise ``-np.finfo(np.float64).eps`` @@ -95,11 +95,11 @@ def fillpositive(xyz, w2_thresh=None): # Use maximum precision xyz = np.asarray(xyz, dtype=MAX_FLOAT) # Calculate w - w2 = 1.0 - np.dot(xyz, xyz) - if w2 < 0: - if w2 < w2_thresh: - raise ValueError(f'w2 should be positive, but is {w2:e}') + w2 = 1.0 - xyz @ xyz + if np.abs(w2) < np.abs(w2_thresh): w = 0 + elif w2 < 0: + raise ValueError(f'w2 should be positive, but is {w2:e}') else: w = np.sqrt(w2) return np.r_[w, xyz] From 0ecaa8e60999d37093b985918708a48d6df79536 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 13 Jan 2023 07:02:08 -0500 Subject: [PATCH 228/702] DOC: Update signs in qform result to satisfy doctests --- doc/source/nifti_images.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/nifti_images.rst b/doc/source/nifti_images.rst index 9318c062d1..39625e5c58 100644 --- a/doc/source/nifti_images.rst +++ b/doc/source/nifti_images.rst @@ -273,8 +273,8 @@ You can get and set the qform affine using the equivalent methods to those for the sform: ``get_qform()``, ``set_qform()``. >>> n1_header.get_qform(coded=True) -(array([[ -2. , 0. , 0. , 117.86], - [ -0. , 1.97, -0.36, -35.72], +(array([[ -2. , 0. , -0. , 117.86], + [ 0. , 1.97, -0.36, -35.72], [ 0. , 0.32, 2.17, -7.25], [ 0. , 0. , 0. , 1. ]]), 1) From 3f30ab525f51fa5d62c0ab4c0e315f51bf132e90 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 13 Jan 2023 06:56:20 -0500 Subject: [PATCH 229/702] ENH: Set w2_thresh to positive values for clarity, update doc to indicate 3*eps --- nibabel/nifti1.py | 2 +- nibabel/quaternions.py | 8 ++++---- nibabel/tests/test_quaternions.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a480afe49a..0c824ef6ad 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -688,7 +688,7 @@ class Nifti1Header(SpmAnalyzeHeader): single_magic = b'n+1' # Quaternion threshold near 0, based on float32 precision - quaternion_threshold = -np.finfo(np.float32).eps * 3 + quaternion_threshold = np.finfo(np.float32).eps * 3 def __init__(self, binaryblock=None, endianness=None, check=True, extensions=()): """Initialize header from binary data block and extensions""" diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index f549605f50..04c570c84b 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -44,8 +44,8 @@ def fillpositive(xyz, w2_thresh=None): w2_thresh : None or float, optional threshold to determine if w squared is non-zero. If None (default) then w2_thresh set equal to - ``-np.finfo(xyz.dtype).eps``, if possible, otherwise - ``-np.finfo(np.float64).eps`` + 3 * ``np.finfo(xyz.dtype).eps``, if possible, otherwise + 3 * ``np.finfo(np.float64).eps`` Returns ------- @@ -89,9 +89,9 @@ def fillpositive(xyz, w2_thresh=None): # If necessary, guess precision of input if w2_thresh is None: try: # trap errors for non-array, integer array - w2_thresh = -np.finfo(xyz.dtype).eps * 3 + w2_thresh = np.finfo(xyz.dtype).eps * 3 except (AttributeError, ValueError): - w2_thresh = -FLOAT_EPS * 3 + w2_thresh = FLOAT_EPS * 3 # Use maximum precision xyz = np.asarray(xyz, dtype=MAX_FLOAT) # Calculate w diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index a02c02564b..ebcb678e0b 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -108,7 +108,7 @@ def test_fillpositive_simulated_error(dtype): # as xyz with small error, and we want to recover the w of 0 # Permit 1 epsilon per value (default, but make explicit here) - w2_thresh = 3 * -np.finfo(dtype).eps + w2_thresh = 3 * np.finfo(dtype).eps pos_error = neg_error = False for _ in range(50): From 6b9b67655f4fe0957a5b10bd4fa5025d10eac323 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 15 Jan 2023 12:54:41 -0500 Subject: [PATCH 230/702] STY: Use norm(), matmul and list comprehensions --- nibabel/tests/test_quaternions.py | 45 +++++++++++++------------------ 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index ebcb678e0b..aea1f7562c 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -29,34 +29,27 @@ def gen_vec(dtype): # Example rotations -eg_rots = [] -params = (-pi, pi, pi / 2) -zs = np.arange(*params) -ys = np.arange(*params) -xs = np.arange(*params) -for z in zs: - for y in ys: - for x in xs: - eg_rots.append(nea.euler2mat(z, y, x)) +eg_rots = [ + nea.euler2mat(z, y, x) + for z in np.arange(-pi, pi, pi / 2) + for y in np.arange(-pi, pi, pi / 2) + for x in np.arange(-pi, pi, pi / 2) +] + # Example quaternions (from rotations) -eg_quats = [] -for M in eg_rots: - eg_quats.append(nq.mat2quat(M)) +eg_quats = [nq.mat2quat(M) for M in eg_rots] # M, quaternion pairs eg_pairs = list(zip(eg_rots, eg_quats)) # Set of arbitrary unit quaternions -unit_quats = set() -params = range(-2, 3) -for w in params: - for x in params: - for y in params: - for z in params: - q = (w, x, y, z) - Nq = np.sqrt(np.dot(q, q)) - if not Nq == 0: - q = tuple([e / Nq for e in q]) - unit_quats.add(q) +unit_quats = set( + tuple(norm(np.r_[w, x, y, z])) + for w in range(-2, 3) + for x in range(-2, 3) + for y in range(-2, 3) + for z in range(-2, 3) + if (w, x, y, z) != (0, 0, 0, 0) +) def test_fillpos(): @@ -184,7 +177,7 @@ def test_norm(): def test_mult(M1, q1, M2, q2): # Test that quaternion * same as matrix * q21 = nq.mult(q2, q1) - assert_array_almost_equal, np.dot(M2, M1), nq.quat2mat(q21) + assert_array_almost_equal, M2 @ M1, nq.quat2mat(q21) @pytest.mark.parametrize('M, q', eg_pairs) @@ -205,7 +198,7 @@ def test_eye(): @pytest.mark.parametrize('M, q', eg_pairs) def test_qrotate(vec, M, q): vdash = nq.rotate_vector(vec, q) - vM = np.dot(M, vec) + vM = M @ vec assert_array_almost_equal(vdash, vM) @@ -238,6 +231,6 @@ def test_angle_axis(): nq.nearly_equivalent(q, q2) aa_mat = nq.angle_axis2mat(theta, vec) assert_array_almost_equal(aa_mat, M) - unit_vec = vec / np.sqrt(vec.dot(vec)) + unit_vec = norm(vec) aa_mat2 = nq.angle_axis2mat(theta, unit_vec, is_normalized=True) assert_array_almost_equal(aa_mat2, M) From aa4b017748603125c6b174713f0473a5119a8e2b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 4 Feb 2023 07:42:28 -0500 Subject: [PATCH 231/702] TEST: Check case that exceeds threshold Also remove explicit check that we randomly generated positive and negative errors. Failing this check is unlikely, but not a bug. --- nibabel/tests/test_quaternions.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index aea1f7562c..fff7c5e040 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -91,6 +91,15 @@ def test_fillpositive_plus_minus_epsilon(dtype): assert nq.fillpositive(plus)[0] == 0.0 assert nq.fillpositive(minus)[0] == 0.0 + # |(x, y, z)| > 1, no real solutions + plus = baseline * nptype(1 + 2 * np.finfo(dtype).eps) + with pytest.raises(ValueError): + nq.fillpositive(plus) + + # |(x, y, z)| < 1, two real solutions, we choose positive + minus = baseline * nptype(1 - 2 * np.finfo(dtype).eps) + assert nq.fillpositive(minus)[0] > 0.0 + @pytest.mark.parametrize('dtype', ('f4', 'f8')) def test_fillpositive_simulated_error(dtype): @@ -107,18 +116,7 @@ def test_fillpositive_simulated_error(dtype): for _ in range(50): xyz = norm(gen_vec(dtype)) - wxyz = nq.fillpositive(xyz, w2_thresh) - assert wxyz[0] == 0.0 - - # Verify that we exercise the threshold - magnitude = xyz @ xyz - if magnitude < 1: - pos_error = True - elif magnitude > 1: - neg_error = True - - assert pos_error, 'Did not encounter a case where 1 - |xyz| > 0' - assert neg_error, 'Did not encounter a case where 1 - |xyz| < 0' + assert nq.fillpositive(xyz, w2_thresh)[0] == 0.0 def test_conjugate(): From 2867397d5628a4b888a4c2ad896c8570eefc8a5e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 24 Jan 2023 09:22:33 -0500 Subject: [PATCH 232/702] TYP: Annotate unknown attributes for Recoders --- nibabel/volumeutils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 225062b2cb..a7dd428921 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -11,6 +11,7 @@ import gzip import sys +import typing as ty import warnings from collections import OrderedDict from functools import reduce @@ -121,6 +122,13 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): self.field1 = self.__dict__[fields[0]] self.add_codes(codes) + def __getattr__(self, key: str) -> ty.Mapping: + # By setting this, we let static analyzers know that dynamic attributes will + # be dict-like (Mapping). + # However, __getattr__ is called if looking up the field in __dict__ fails, + # so we only get here if the attribute is really missing. + raise AttributeError(f'{self.__class__.__name__!r} object has no attribute {key!r}') + def add_codes(self, code_syn_seqs): """Add codes to object From 5e388c6b13c975f92758986be760dcd8884df689 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 28 Jan 2023 16:36:50 -0500 Subject: [PATCH 233/702] TYP/RF: Annotate the Recoder and DtypeMapper classes --- nibabel/volumeutils.py | 56 ++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 30 deletions(-) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index a7dd428921..ca6106f15d 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -13,7 +13,6 @@ import sys import typing as ty import warnings -from collections import OrderedDict from functools import reduce from operator import mul from os.path import exists, splitext @@ -84,7 +83,14 @@ class Recoder: 2 """ - def __init__(self, codes, fields=('code',), map_maker=OrderedDict): + fields: tuple[str, ...] + + def __init__( + self, + codes: ty.Sequence[ty.Sequence[ty.Hashable]], + fields: ty.Sequence[str] = ('code',), + map_maker: type[ty.Mapping[ty.Hashable, ty.Hashable]] = dict, + ): """Create recoder object ``codes`` give a sequence of code, alias sequences @@ -122,14 +128,14 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): self.field1 = self.__dict__[fields[0]] self.add_codes(codes) - def __getattr__(self, key: str) -> ty.Mapping: + def __getattr__(self, key: str) -> ty.Mapping[ty.Hashable, ty.Hashable]: # By setting this, we let static analyzers know that dynamic attributes will # be dict-like (Mapping). # However, __getattr__ is called if looking up the field in __dict__ fails, # so we only get here if the attribute is really missing. raise AttributeError(f'{self.__class__.__name__!r} object has no attribute {key!r}') - def add_codes(self, code_syn_seqs): + def add_codes(self, code_syn_seqs: ty.Sequence[ty.Sequence[ty.Hashable]]) -> None: """Add codes to object Parameters @@ -163,7 +169,7 @@ def add_codes(self, code_syn_seqs): for field_ind, field_name in enumerate(self.fields): self.__dict__[field_name][alias] = code_syns[field_ind] - def __getitem__(self, key): + def __getitem__(self, key: ty.Hashable) -> ty.Hashable: """Return value from field1 dictionary (first column of values) Returns same value as ``obj.field1[key]`` and, with the @@ -176,13 +182,9 @@ def __getitem__(self, key): """ return self.field1[key] - def __contains__(self, key): + def __contains__(self, key: ty.Hashable) -> bool: """True if field1 in recoder contains `key`""" - try: - self.field1[key] - except KeyError: - return False - return True + return key in self.field1 def keys(self): """Return all available code and alias values @@ -198,7 +200,7 @@ def keys(self): """ return self.field1.keys() - def value_set(self, name=None): + def value_set(self, name: str | None = None) -> OrderedSet: """Return OrderedSet of possible returned values for column By default, the column is the first column. @@ -232,7 +234,7 @@ def value_set(self, name=None): endian_codes = Recoder(_endian_codes) -class DtypeMapper: +class DtypeMapper(dict[ty.Hashable, ty.Hashable]): """Specialized mapper for numpy dtypes We pass this mapper into the Recoder class to deal with numpy dtype @@ -250,26 +252,20 @@ class DtypeMapper: and return any matching values for the matching key. """ - def __init__(self): - self._dict = {} - self._dtype_keys = [] - - def keys(self): - return self._dict.keys() - - def values(self): - return self._dict.values() + def __init__(self) -> None: + super().__init__() + self._dtype_keys: list[np.dtype] = [] - def __setitem__(self, key, value): + def __setitem__(self, key: ty.Hashable, value: ty.Hashable) -> None: """Set item into mapping, checking for dtype keys Cache dtype keys for comparison test in __getitem__ """ - self._dict[key] = value - if hasattr(key, 'subdtype'): + super().__setitem__(key, value) + if isinstance(key, np.dtype): self._dtype_keys.append(key) - def __getitem__(self, key): + def __getitem__(self, key: ty.Hashable) -> ty.Hashable: """Get item from mapping, checking for dtype keys First do simple hash lookup, then check for a dtype key that has failed @@ -277,13 +273,13 @@ def __getitem__(self, key): to `key`. """ try: - return self._dict[key] + return super().__getitem__(key) except KeyError: pass - if hasattr(key, 'subdtype'): + if isinstance(key, np.dtype): for dt in self._dtype_keys: if key == dt: - return self._dict[dt] + return super().__getitem__(dt) raise KeyError(key) @@ -347,7 +343,7 @@ def pretty_mapping(mapping, getterfunc=None): return '\n'.join(out) -def make_dt_codes(codes_seqs): +def make_dt_codes(codes_seqs: ty.Sequence[ty.Sequence]) -> Recoder: """Create full dt codes Recoder instance from datatype codes Include created numpy dtype (from numpy type) and opposite endian From 389117bbed080b36916d3bbe6895568c33668486 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 28 Jan 2023 16:43:31 -0500 Subject: [PATCH 234/702] TYP: Annotate pretty_mapping --- nibabel/volumeutils.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index ca6106f15d..7ab55f6c60 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -14,7 +14,7 @@ import typing as ty import warnings from functools import reduce -from operator import mul +from operator import getitem, mul from os.path import exists, splitext import numpy as np @@ -26,6 +26,10 @@ pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') +if ty.TYPE_CHECKING: # pragma: no cover + K = ty.TypeVar('K') + V = ty.TypeVar('V') + sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' swapped_code = sys_is_le and '>' or '<' @@ -283,7 +287,10 @@ def __getitem__(self, key: ty.Hashable) -> ty.Hashable: raise KeyError(key) -def pretty_mapping(mapping, getterfunc=None): +def pretty_mapping( + mapping: ty.Mapping[K, V], + getterfunc: ty.Callable[[ty.Mapping[K, V], K], V] | None = None, +) -> str: """Make pretty string from mapping Adjusts text column to print values on basis of longest key. @@ -332,9 +339,8 @@ def pretty_mapping(mapping, getterfunc=None): longer_field : method string """ if getterfunc is None: - getterfunc = lambda obj, key: obj[key] - lens = [len(str(name)) for name in mapping] - mxlen = np.max(lens) + getterfunc = getitem + mxlen = max(len(str(name)) for name in mapping) fmt = '%%-%ds : %%s' % mxlen out = [] for name in mapping: From 8e1b9ac48c75a4cbacfcbfad3ad496d5f8896507 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 28 Jan 2023 17:32:46 -0500 Subject: [PATCH 235/702] TYP: Annotate volumeutils --- nibabel/volumeutils.py | 224 ++++++++++++++++++++++++++--------------- 1 file changed, 145 insertions(+), 79 deletions(-) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 7ab55f6c60..d61a41e679 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -10,9 +10,11 @@ from __future__ import annotations import gzip +import io import sys import typing as ty import warnings +from bz2 import BZ2File from functools import reduce from operator import getitem, mul from os.path import exists, splitext @@ -21,14 +23,22 @@ from .casting import OK_FLOATS, shared_range from .externals.oset import OrderedSet -from .openers import BZ2File, IndexedGzipFile +from .openers import IndexedGzipFile from .optpkg import optional_package -pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') - if ty.TYPE_CHECKING: # pragma: no cover + import numpy.typing as npt + import pyzstd + + HAVE_ZSTD = True + + Scalar = np.number | float + K = ty.TypeVar('K') V = ty.TypeVar('V') + DT = ty.TypeVar('DT', bound=np.generic) +else: + pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' @@ -46,7 +56,7 @@ default_compresslevel = 1 #: file-like classes known to hold compressed data -COMPRESSED_FILE_LIKES: tuple[type, ...] = (gzip.GzipFile, BZ2File, IndexedGzipFile) +COMPRESSED_FILE_LIKES: tuple[type[io.IOBase], ...] = (gzip.GzipFile, BZ2File, IndexedGzipFile) # Enable .zst support if pyzstd installed. if HAVE_ZSTD: @@ -238,7 +248,7 @@ def value_set(self, name: str | None = None) -> OrderedSet: endian_codes = Recoder(_endian_codes) -class DtypeMapper(dict[ty.Hashable, ty.Hashable]): +class DtypeMapper(ty.Dict[ty.Hashable, ty.Hashable]): """Specialized mapper for numpy dtypes We pass this mapper into the Recoder class to deal with numpy dtype @@ -389,12 +399,19 @@ def make_dt_codes(codes_seqs: ty.Sequence[ty.Sequence]) -> Recoder: return Recoder(dt_codes, fields + ['dtype', 'sw_dtype'], DtypeMapper) -def _is_compressed_fobj(fobj): +def _is_compressed_fobj(fobj: io.IOBase) -> bool: """Return True if fobj represents a compressed data file-like object""" return isinstance(fobj, COMPRESSED_FILE_LIKES) -def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): +def array_from_file( + shape: tuple[int, ...], + in_dtype: np.dtype[DT], + infile: io.IOBase, + offset: int = 0, + order: ty.Literal['C', 'F'] = 'F', + mmap: bool | ty.Literal['c', 'r', 'r+'] = True, +) -> npt.NDArray[DT]: """Get array from file with specified shape, dtype and file offset Parameters @@ -439,24 +456,23 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): """ if mmap not in (True, False, 'c', 'r', 'r+'): raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") - if mmap is True: - mmap = 'c' in_dtype = np.dtype(in_dtype) # Get file-like object from Opener instance infile = getattr(infile, 'fobj', infile) if mmap and not _is_compressed_fobj(infile): + mode = 'c' if mmap is True else mmap try: # Try memmapping file on disk - return np.memmap(infile, in_dtype, mode=mmap, shape=shape, order=order, offset=offset) + return np.memmap(infile, in_dtype, mode=mode, shape=shape, order=order, offset=offset) # The error raised by memmap, for different file types, has # changed in different incarnations of the numpy routine except (AttributeError, TypeError, ValueError): pass if len(shape) == 0: - return np.array([]) + return np.array([], in_dtype) # Use reduce and mul to work around numpy integer overflow n_bytes = reduce(mul, shape) * in_dtype.itemsize if n_bytes == 0: - return np.array([]) + return np.array([], in_dtype) # Read data from file infile.seek(offset) if hasattr(infile, 'readinto'): @@ -472,7 +488,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): f'Expected {n_bytes} bytes, got {n_read} bytes from ' f"{getattr(infile, 'name', 'object')}\n - could the file be damaged?" ) - arr = np.ndarray(shape, in_dtype, buffer=data_bytes, order=order) + arr: np.ndarray = np.ndarray(shape, in_dtype, buffer=data_bytes, order=order) if needs_copy: return arr.copy() arr.flags.writeable = True @@ -480,17 +496,17 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): def array_to_file( - data, - fileobj, - out_dtype=None, - offset=0, - intercept=0.0, - divslope=1.0, - mn=None, - mx=None, - order='F', - nan2zero=True, -): + data: npt.ArrayLike, + fileobj: io.IOBase, + out_dtype: np.dtype | None = None, + offset: int = 0, + intercept: Scalar = 0.0, + divslope: Scalar | None = 1.0, + mn: Scalar | None = None, + mx: Scalar | None = None, + order: ty.Literal['C', 'F'] = 'F', + nan2zero: bool = True, +) -> None: """Helper function for writing arrays to file objects Writes arrays as scaled by `intercept` and `divslope`, and clipped @@ -572,8 +588,7 @@ def array_to_file( True """ # Shield special case - div_none = divslope is None - if not np.all(np.isfinite((intercept, 1.0 if div_none else divslope))): + if not np.isfinite(np.array((intercept, 1.0 if divslope is None else divslope))).all(): raise ValueError('divslope and intercept must be finite') if divslope == 0: raise ValueError('divslope cannot be zero') @@ -585,7 +600,7 @@ def array_to_file( out_dtype = np.dtype(out_dtype) if offset is not None: seek_tell(fileobj, offset) - if div_none or (mn, mx) == (0, 0) or ((mn is not None and mx is not None) and mx < mn): + if divslope is None or (mn, mx) == (0, 0) or ((mn is not None and mx is not None) and mx < mn): write_zeros(fileobj, data.size * out_dtype.itemsize) return if order not in 'FC': @@ -717,17 +732,17 @@ def array_to_file( def _write_data( - data, - fileobj, - out_dtype, - order, - in_cast=None, - pre_clips=None, - inter=0.0, - slope=1.0, - post_clips=None, - nan_fill=None, -): + data: np.ndarray, + fileobj: io.IOBase, + out_dtype: np.dtype, + order: ty.Literal['C', 'F'], + in_cast: np.dtype | None = None, + pre_clips: tuple[Scalar | None, Scalar | None] | None = None, + inter: Scalar | np.ndarray = 0.0, + slope: Scalar | np.ndarray = 1.0, + post_clips: tuple[Scalar | None, Scalar | None] | None = None, + nan_fill: Scalar | None = None, +) -> None: """Write array `data` to `fileobj` as `out_dtype` type, layout `order` Does not modify `data` in-place. @@ -784,7 +799,9 @@ def _write_data( fileobj.write(dslice.tobytes()) -def _dt_min_max(dtype_like, mn=None, mx=None): +def _dt_min_max( + dtype_like: npt.DTypeLike, mn: Scalar | None = None, mx: Scalar | None = None +) -> tuple[Scalar, Scalar]: dt = np.dtype(dtype_like) if dt.kind in 'fc': dt_mn, dt_mx = (-np.inf, np.inf) @@ -796,20 +813,25 @@ def _dt_min_max(dtype_like, mn=None, mx=None): return dt_mn if mn is None else mn, dt_mx if mx is None else mx -_CSIZE2FLOAT = {8: np.float32, 16: np.float64, 24: np.longdouble, 32: np.longdouble} +_CSIZE2FLOAT: dict[int, type[np.floating]] = { + 8: np.float32, + 16: np.float64, + 24: np.longdouble, + 32: np.longdouble, +} -def _matching_float(np_type): +def _matching_float(np_type: npt.DTypeLike) -> type[np.floating]: """Return floating point type matching `np_type`""" dtype = np.dtype(np_type) if dtype.kind not in 'cf': raise ValueError('Expecting float or complex type as input') - if dtype.kind in 'f': + if issubclass(dtype.type, np.floating): return dtype.type return _CSIZE2FLOAT[dtype.itemsize] -def write_zeros(fileobj, count, block_size=8194): +def write_zeros(fileobj: io.IOBase, count: int, block_size: int = 8194) -> None: """Write `count` zero bytes to `fileobj` Parameters @@ -829,7 +851,7 @@ def write_zeros(fileobj, count, block_size=8194): fileobj.write(b'\x00' * rem) -def seek_tell(fileobj, offset, write0=False): +def seek_tell(fileobj: io.IOBase, offset: int, write0: bool = False) -> None: """Seek in `fileobj` or check we're in the right place already Parameters @@ -859,7 +881,11 @@ def seek_tell(fileobj, offset, write0=False): assert fileobj.tell() == offset -def apply_read_scaling(arr, slope=None, inter=None): +def apply_read_scaling( + arr: np.ndarray, + slope: Scalar | None = None, + inter: Scalar | None = None, +) -> np.ndarray: """Apply scaling in `slope` and `inter` to array `arr` This is for loading the array from a file (as opposed to the reverse @@ -898,23 +924,28 @@ def apply_read_scaling(arr, slope=None, inter=None): return arr shape = arr.shape # Force float / float upcasting by promoting to arrays - arr, slope, inter = (np.atleast_1d(v) for v in (arr, slope, inter)) + slope1d, inter1d = (np.atleast_1d(v) for v in (slope, inter)) + arr = np.atleast_1d(arr) if arr.dtype.kind in 'iu': # int to float; get enough precision to avoid infs # Find floating point type for which scaling does not overflow, # starting at given type - default = slope.dtype.type if slope.dtype.kind == 'f' else np.float64 - ftype = int_scinter_ftype(arr.dtype, slope, inter, default) - slope = slope.astype(ftype) - inter = inter.astype(ftype) - if slope != 1.0: - arr = arr * slope - if inter != 0.0: - arr = arr + inter + default = slope1d.dtype.type if slope1d.dtype.kind == 'f' else np.float64 + ftype = int_scinter_ftype(arr.dtype, slope1d, inter1d, default) + slope1d = slope1d.astype(ftype) + inter1d = inter1d.astype(ftype) + if slope1d != 1.0: + arr = arr * slope1d + if inter1d != 0.0: + arr = arr + inter1d return arr.reshape(shape) -def working_type(in_type, slope=1.0, inter=0.0): +def working_type( + in_type: npt.DTypeLike, + slope: npt.ArrayLike = 1.0, + inter: npt.ArrayLike = 0.0, +) -> type[np.number]: """Return array type from applying `slope`, `inter` to array of `in_type` Numpy type that results from an array of type `in_type` being combined with @@ -945,19 +976,22 @@ def working_type(in_type, slope=1.0, inter=0.0): `in_type`. """ val = np.array([1], dtype=in_type) - slope = np.array(slope) - inter = np.array(inter) # Don't use real values to avoid overflows. Promote to 1D to avoid scalar # casting rules. Don't use ones_like, zeros_like because of a bug in numpy # <= 1.5.1 in converting complex192 / complex256 scalars. if inter != 0: - val = val + np.array([0], dtype=inter.dtype) + val = val + np.array([0], dtype=np.array(inter).dtype) if slope != 1: - val = val / np.array([1], dtype=slope.dtype) + val = val / np.array([1], dtype=np.array(slope).dtype) return val.dtype.type -def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): +def int_scinter_ftype( + ifmt: type[np.integer], + slope: npt.ArrayLike = 1.0, + inter: npt.ArrayLike = 0.0, + default: type[np.floating] = np.float32, +) -> type[np.floating]: """float type containing int type `ifmt` * `slope` + `inter` Return float type that can represent the max and the min of the `ifmt` type @@ -1009,7 +1043,12 @@ def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): raise ValueError('Overflow using highest floating point type') -def best_write_scale_ftype(arr, slope=1.0, inter=0.0, default=np.float32): +def best_write_scale_ftype( + arr: np.ndarray, + slope: npt.ArrayLike = 1.0, + inter: npt.ArrayLike = 0.0, + default: type[np.number] = np.float32, +) -> type[np.floating]: """Smallest float type to contain range of ``arr`` after scaling Scaling that will be applied to ``arr`` is ``(arr - inter) / slope``. @@ -1073,7 +1112,11 @@ def best_write_scale_ftype(arr, slope=1.0, inter=0.0, default=np.float32): return OK_FLOATS[-1] -def better_float_of(first, second, default=np.float32): +def better_float_of( + first: npt.DTypeLike, + second: npt.DTypeLike, + default: type[np.floating] = np.float32, +) -> type[np.floating]: """Return more capable float type of `first` and `second` Return `default` if neither of `first` or `second` is a float @@ -1107,19 +1150,22 @@ def better_float_of(first, second, default=np.float32): first = np.dtype(first) second = np.dtype(second) default = np.dtype(default).type - kinds = (first.kind, second.kind) - if 'f' not in kinds: - return default - if kinds == ('f', 'f'): - if first.itemsize >= second.itemsize: - return first.type - return second.type - if first.kind == 'f': + if issubclass(first.type, np.floating): + if issubclass(second.type, np.floating) and first.itemsize < second.itemsize: + return second.type return first.type - return second.type + if issubclass(second.type, np.floating): + return second.type + return default -def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', default=np.float32): +def _ftype4scaled_finite( + tst_arr: np.ndarray, + slope: npt.ArrayLike, + inter: npt.ArrayLike, + direction: ty.Literal['read', 'write'] = 'read', + default: type[np.floating] = np.float32, +) -> type[np.floating]: """Smallest float type for scaling of `tst_arr` that does not overflow""" assert direction in ('read', 'write') if default not in OK_FLOATS and default is np.longdouble: @@ -1130,7 +1176,6 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', default=np.flo tst_arr = np.atleast_1d(tst_arr) slope = np.atleast_1d(slope) inter = np.atleast_1d(inter) - overflow_filter = ('error', '.*overflow.*', RuntimeWarning) for ftype in OK_FLOATS[def_ind:]: tst_trans = tst_arr.copy() slope = slope.astype(ftype) @@ -1138,7 +1183,7 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', default=np.flo try: with warnings.catch_warnings(): # Error on overflows to short circuit the logic - warnings.filterwarnings(*overflow_filter) + warnings.filterwarnings('error', '.*overflow.*', RuntimeWarning) if direction == 'read': # as in reading of image from disk if slope != 1.0: tst_trans = tst_trans * slope @@ -1157,7 +1202,22 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', default=np.flo raise ValueError('Overflow using highest floating point type') -def finite_range(arr, check_nan=False): +@ty.overload +def finite_range( + arr: npt.ArrayLike, check_nan: ty.Literal[False] = False +) -> tuple[Scalar, Scalar]: + ... # pragma: no cover + + +@ty.overload +def finite_range(arr: npt.ArrayLike, check_nan: ty.Literal[True]) -> tuple[Scalar, Scalar, bool]: + ... # pragma: no cover + + +def finite_range( + arr: npt.ArrayLike, + check_nan: bool = False, +) -> tuple[Scalar, Scalar, bool] | tuple[Scalar, Scalar]: """Get range (min, max) or range and flag (min, max, has_nan) from `arr` Parameters @@ -1205,7 +1265,9 @@ def finite_range(arr, check_nan=False): """ arr = np.asarray(arr) if arr.size == 0: - return (np.inf, -np.inf) + (False,) * check_nan + if check_nan: + return (np.inf, -np.inf, False) + return (np.inf, -np.inf) # Resort array to slowest->fastest memory change indices stride_order = np.argsort(arr.strides)[::-1] sarr = arr.transpose(stride_order) @@ -1253,7 +1315,11 @@ def finite_range(arr, check_nan=False): return np.nanmin(mins), np.nanmax(maxes) -def shape_zoom_affine(shape, zooms, x_flip=True): +def shape_zoom_affine( + shape: ty.Sequence[int] | np.ndarray, + zooms: ty.Sequence[float] | np.ndarray, + x_flip: bool = True, +) -> np.ndarray: """Get affine implied by given shape and zooms We get the translations from the center of the image (implied by @@ -1315,7 +1381,7 @@ def shape_zoom_affine(shape, zooms, x_flip=True): return aff -def rec2dict(rec): +def rec2dict(rec: np.ndarray) -> dict[str, np.generic | np.ndarray]: """Convert recarray to dictionary Also converts scalar values to scalars @@ -1348,7 +1414,7 @@ def rec2dict(rec): return dct -def fname_ext_ul_case(fname): +def fname_ext_ul_case(fname: str) -> str: """`fname` with ext changed to upper / lower case if file exists Check for existence of `fname`. If it does exist, return unmodified. If From 92c90ae3525dce2da7153538eb12d2b55d8995a0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 29 Jan 2023 19:38:08 -0500 Subject: [PATCH 236/702] MNT: Add pyzstd to typing requirements --- pyproject.toml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e002f6d053..83556a6b84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,11 +74,12 @@ test = [ ] typing = [ "mypy", + "importlib_resources", + "pydicom", "pytest", + "pyzstd", "types-setuptools", "types-Pillow", - "pydicom", - "importlib_resources", ] zstd = ["pyzstd >= 0.14.3"] From 0c813bf0a8359899eb5b2d4de8ba83d7ed62e497 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 8 Feb 2023 10:04:40 +0200 Subject: [PATCH 237/702] DOC: Added badges and organized in table format --- README.rst | 90 +++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 69 insertions(+), 21 deletions(-) diff --git a/README.rst b/README.rst index 3378e751c2..e8e4d6c3b7 100644 --- a/README.rst +++ b/README.rst @@ -1,29 +1,77 @@ .. -*- rest -*- .. vim:syntax=rst -.. image:: https://codecov.io/gh/nipy/nibabel/branch/master/graph/badge.svg - :target: https://codecov.io/gh/nipy/nibabel - -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.591597.svg - :target: https://doi.org/10.5281/zenodo.591597 - -.. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 - :target: https://repology.org/project/python:nibabel/versions - :alt: Arch (AUR) - -.. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable - :target: https://repology.org/project/nibabel/versions - :alt: Debian Unstable package - -.. image:: https://repology.org/badge/version-for-repo/gentoo_ovl_science/nibabel.svg?header=Gentoo%20%28%3A%3Ascience%29 - :target: https://repology.org/project/nibabel/versions - :alt: Gentoo (::science) +.. Following contents should be from LONG_DESCRIPTION in nibabel/info.py -.. image:: https://repology.org/badge/version-for-repo/nix_unstable/python:nibabel.svg?header=nixpkgs%20unstable - :target: https://repology.org/project/python:nibabel/versions - :alt: nixpkgs unstable -.. Following contents should be from LONG_DESCRIPTION in nibabel/info.py +.. list-table:: + :widths: 20 80 + :header-rows: 0 + + * - Code + - + .. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: code style: black + .. image:: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336 + :target: https://pycqa.github.io/isort/ + :alt: imports: isort + .. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white + :target: https://github.com/pre-commit/pre-commit + :alt: pre-commit + .. image:: https://codecov.io/gh/nipy/nibabel/branch/master/graph/badge.svg + :target: https://codecov.io/gh/nipy/nibabel + :alt: codecov badge + .. image:: https://img.shields.io/librariesio/github/nipy/nibabel + :target: https://libraries.io/github/nipy/nibabel + :alt: Libraries.io dependency status for GitHub repo + * - Status + - + .. image:: https://github.com/nipy/nibabel/actions/workflows/stable.yml/badge.svg + :target: https://github.com/nipy/nibabel/actions/workflows/stable.yml + :alt: stable tests + .. image:: https://github.com/nipy/nibabel/actions/workflows/pages/pages-build-deployment/badge.svg + :target: https://github.com/nipy/nibabel/actions/workflows/pages/pages-build-deployment + :alt: documentation build + * - Packaging + - + .. image:: https://img.shields.io/pypi/v/nibabel.svg + :target: https://pypi.python.org/pypi/nibabel/ + :alt: PyPI version + .. image:: https://img.shields.io/pypi/format/nibabel.svg + :target: https://pypi.org/project/nibabel + :alt: PyPI Format + .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg + :target: https://pypi.python.org/pypi/nibabel/ + :alt: PyPI - Python Version + .. image:: https://img.shields.io/pypi/implementation/nibabel.svg + :target: https://pypi.python.org/pypi/nibabel/ + :alt: PyPI - Implementation + .. image:: https://img.shields.io/pypi/dm/nibabel.svg + :target: https://pypistats.org/packages/nibabel + :alt: PyPI - Downloads + * - Distribution + - + .. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 + :target: https://repology.org/project/python:nibabel/versions + :alt: Arch (AUR) + .. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable + :target: https://repology.org/project/nibabel/versions + :alt: Debian Unstable package + .. image:: https://repology.org/badge/version-for-repo/gentoo_ovl_science/nibabel.svg?header=Gentoo%20%28%3A%3Ascience%29 + :target: https://repology.org/project/nibabel/versions + :alt: Gentoo (::science) + .. image:: https://repology.org/badge/version-for-repo/nix_unstable/python:nibabel.svg?header=nixpkgs%20unstable + :target: https://repology.org/project/python:nibabel/versions + :alt: nixpkgs unstable + * - License & DOI + - + .. image:: https://img.shields.io/pypi/l/nibabel.svg + :target: https://github.com/nipy/nibabel/blob/master/COPYING + :alt: License + .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.591597.svg + :target: https://doi.org/10.5281/zenodo.591597 + :alt: Zenodo DOI ======= From acc41166ed693bbd090a916b9bd55094b6d29326 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 8 Feb 2023 12:36:36 +0200 Subject: [PATCH 238/702] DOC: Organized existing contents --- README.rst | 142 +++++++++++++++++++++++++---------------------------- 1 file changed, 66 insertions(+), 76 deletions(-) diff --git a/README.rst b/README.rst index e8e4d6c3b7..26b3446629 100644 --- a/README.rst +++ b/README.rst @@ -1,8 +1,37 @@ .. -*- rest -*- .. vim:syntax=rst -.. Following contents should be from LONG_DESCRIPTION in nibabel/info.py +.. Following contents should be copied from LONG_DESCRIPTION in NiBabel/info.py +======= +NiBabel +======= + +Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), +GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC +In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, +and provides some limited support for DICOM_. + +NiBabel's API gives full or selective access to header information (metadata) and access to the image +data is made available via NumPy arrays. For more information, see NiBabel's `documentation site`_ +and `API reference`_. + +.. _API reference: https://nipy.org/nibabel/api.html +.. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes +.. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm +.. _CIFTI-2: https://www.nitrc.org/projects/cifti/ +.. _DICOM: http://medical.nema.org/ +.. _documentation site: http://nipy.org/NiBabel +.. _ECAT: http://xmedcon.sourceforge.net/Docs/Ecat +.. _Freesurfer: https://surfer.nmr.mgh.harvard.edu +.. _GIFTI: https://www.nitrc.org/projects/gifti +.. _MGH: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat +.. _MINC1: + https://en.wikibooks.org/wiki/MINC/Reference/MINC1_File_Format_Reference +.. _MINC2: + https://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference +.. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ +.. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ .. list-table:: :widths: 20 80 @@ -19,27 +48,29 @@ .. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white :target: https://github.com/pre-commit/pre-commit :alt: pre-commit - .. image:: https://codecov.io/gh/nipy/nibabel/branch/master/graph/badge.svg - :target: https://codecov.io/gh/nipy/nibabel + .. image:: https://codecov.io/gh/nipy/NiBabel/branch/master/graph/badge.svg + :target: https://codecov.io/gh/nipy/NiBabel :alt: codecov badge - .. image:: https://img.shields.io/librariesio/github/nipy/nibabel - :target: https://libraries.io/github/nipy/nibabel + .. image:: https://img.shields.io/librariesio/github/nipy/NiBabel + :target: https://libraries.io/github/nipy/NiBabel :alt: Libraries.io dependency status for GitHub repo + * - Status - - .. image:: https://github.com/nipy/nibabel/actions/workflows/stable.yml/badge.svg - :target: https://github.com/nipy/nibabel/actions/workflows/stable.yml + .. image:: https://github.com/nipy/NiBabel/actions/workflows/stable.yml/badge.svg + :target: https://github.com/nipy/NiBabel/actions/workflows/stable.yml :alt: stable tests - .. image:: https://github.com/nipy/nibabel/actions/workflows/pages/pages-build-deployment/badge.svg - :target: https://github.com/nipy/nibabel/actions/workflows/pages/pages-build-deployment + .. image:: https://github.com/nipy/NiBabel/actions/workflows/pages/pages-build-deployment/badge.svg + :target: https://github.com/nipy/NiBabel/actions/workflows/pages/pages-build-deployment :alt: documentation build + * - Packaging - .. image:: https://img.shields.io/pypi/v/nibabel.svg :target: https://pypi.python.org/pypi/nibabel/ :alt: PyPI version .. image:: https://img.shields.io/pypi/format/nibabel.svg - :target: https://pypi.org/project/nibabel + :target: https://pypi.org/project/nibabel/ :alt: PyPI Format .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg :target: https://pypi.python.org/pypi/nibabel/ @@ -48,8 +79,9 @@ :target: https://pypi.python.org/pypi/nibabel/ :alt: PyPI - Implementation .. image:: https://img.shields.io/pypi/dm/nibabel.svg - :target: https://pypistats.org/packages/nibabel + :target: https://pypistats.org/packages/nibabel/ :alt: PyPI - Downloads + * - Distribution - .. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 @@ -73,86 +105,44 @@ :target: https://doi.org/10.5281/zenodo.591597 :alt: Zenodo DOI +Installation +============ -======= -NiBabel -======= +To install NiBabel's `current release`_ with ``pip``, run:: -Read / write access to some common neuroimaging file formats + pip install nibabel -This package provides read +/- write access to some common medical and -neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, MGH_ and -ECAT_ as well as Philips PAR/REC. We can read and write FreeSurfer_ geometry, -annotation and morphometry files. There is some very limited support for -DICOM_. NiBabel is the successor of PyNIfTI_. +To install the latest development version, run:: -.. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm -.. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes -.. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ -.. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ -.. _CIFTI-2: https://www.nitrc.org/projects/cifti/ -.. _MINC1: - https://en.wikibooks.org/wiki/MINC/Reference/MINC1_File_Format_Reference -.. _MINC2: - https://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference -.. _PyNIfTI: http://niftilib.sourceforge.net/pynifti/ -.. _GIFTI: https://www.nitrc.org/projects/gifti -.. _MGH: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat -.. _ECAT: http://xmedcon.sourceforge.net/Docs/Ecat -.. _Freesurfer: https://surfer.nmr.mgh.harvard.edu -.. _DICOM: http://medical.nema.org/ - -The various image format classes give full or selective access to header -(meta) information and access to the image data is made available via NumPy -arrays. + pip install git+https://github.com/nipy/nibabel -Website -======= +For more information on previous releases, see the `release archive`_. -Current documentation on nibabel can always be found at the `NIPY nibabel -website `_. +.. _current release: https://pypi.python.org/pypi/NiBabel +.. _release archive: https://github.com/nipy/NiBabel/releases -Mailing Lists -============= +Mailing List +============ Please send any questions or suggestions to the `neuroimaging mailing list `_. -Code -==== - -Install nibabel with:: - - pip install nibabel - -You may also be interested in: - -* the `nibabel code repository`_ on Github; -* documentation_ for all releases and current development tree; -* download the `current release`_ from pypi; -* download `current development version`_ as a zip file; -* downloads of all `available releases`_. - -.. _nibabel code repository: https://github.com/nipy/nibabel -.. _Documentation: http://nipy.org/nibabel -.. _current release: https://pypi.python.org/pypi/nibabel -.. _current development version: https://github.com/nipy/nibabel/archive/master.zip -.. _available releases: https://github.com/nipy/nibabel/releases - License ======= -Nibabel is licensed under the terms of the MIT license. Some code included -with nibabel is licensed under the BSD license. Please see the COPYING file -in the nibabel distribution. +NiBabel is licensed under the terms of the `MIT license`_. Some code included +with NiBabel is licensed under the `BSD license`_. For more information, +please see the COPYING_ file. -Citing nibabel -============== +.. _BSD license: https://opensource.org/licenses/BSD-3-Clause +.. _COPYING: https://github.com/nipy/nibabel/blob/master/COPYING +.. _MIT license: https://github.com/nipy/nibabel/blob/master/COPYING#nibabel -Please see the `available releases`_ for the release of nibabel that you are -using. Recent releases have a Zenodo_ `Digital Object Identifier`_ badge at -the top of the release notes. Click on the badge for more information. +Citation +======== + +Recent NiBabel releases have a Zenodo_ `Digital Object Identifier`_ (DOI) badge at +the top of the release notes. Click on the badge for more information. -.. _zenodo: https://zenodo.org .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier +.. _zenodo: https://zenodo.org From 69df8a53200c61b0e94e44ba749af8dac596109e Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 8 Feb 2023 13:00:01 +0200 Subject: [PATCH 239/702] DOC: Replaced title with documentation site logo --- README.rst | 8 ++++---- doc/pics/logo.png | Bin 0 -> 35515 bytes 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 doc/pics/logo.png diff --git a/README.rst b/README.rst index 26b3446629..daf8118012 100644 --- a/README.rst +++ b/README.rst @@ -3,16 +3,16 @@ .. Following contents should be copied from LONG_DESCRIPTION in NiBabel/info.py -======= -NiBabel -======= +.. image:: doc/pics/logo.png + :target: https://nipy.org/nibabel + :alt: NiBabel logo Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, and provides some limited support for DICOM_. -NiBabel's API gives full or selective access to header information (metadata) and access to the image +NiBabel's API gives full or selective access to header information (metadata), and image data is made available via NumPy arrays. For more information, see NiBabel's `documentation site`_ and `API reference`_. diff --git a/doc/pics/logo.png b/doc/pics/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..570d38f4769963691b7a5e36ad7efcaa921c70ca GIT binary patch literal 35515 zcmbq)Wmg_+)9m2x?(QC(;O_2DfMCJhU4lD=5ZpbuyE_DTcMI;Ed++_czu+tuvgQM8 z=9=#6s_MGGD=SJN!Q;aN0DvSTEv^ax;EtfbLt&voKRM88Z)WOOA ztJODi0H96qO%Rao6DAxmQX9y_g83Z5V^*SwEiM6xQz?o@AEKM90h1)xl_MjTxU)ms zj7|8eUBTDPb9%Bx4;w^6TE(mw$*j8bktiC4ixT_t_hMuBf{1i(Oumk95Kw5EP$y^p zT3h~QkpVJf8p2reYbqO!jLc(fwKwFW7tXHE`{6rbhxOzaVPVQF^Drw6dsHRc73f3X z9ax4@`oFw?OJr~;{q|aS{t_Gpje$=^rytMpJBDa8*8iRztgy81PwH3E_(Nwj>`~#g zu~U&!Ug4?q-s_-fq;tSGK}J~17-1D*J-lXaGWlau#C~y*I=m{6P)h$qR!m%@ssMrx4GCPo=W50SX@K|ZqhKb1w z^41mtXOT%^8G_k8Adp6k%Ulb77W3M3>n*yI4&inaeBH*-e!@3X5U@I-_k8Li-fj|n7ys7xt$9j=45W-Vs1w2ZslT0DkY!&2M=e5sGukY!Ul&GW^r_WVh zf**jp9q+^Bj~^(9nTMVLc%U#TDlFvx&#zY+k+)=fzjOGumYd5afvu~=i~s#k={0{+ zrH_TIN^r48zZg37UmQd^objU2CGZfkcf!~!o|?2>p3sJG^VNh#Nf#pj-H7y>`*;S{ zeaKgq=jzQc$LOHPh%Y~2p+@4xk%63HB7aFsASV|eP-cX(J>;FULlIUlA+1ys(ljPU zQjvy4TD<+;oBeUkJ(Pi9@FCC1sDpRK=+(VVY4H8On@VVQHXAvYvU6sibtCFS6{#%< zVndP4VfvBu_8TBAlZS1W2^*yCvy1@%ax(ltO(dFT`Co_V8q^^s&g&5*=s{#;tw!CY zB2O0BbHp6KlgCjABD98L#Me)laIx=ixP!h185ib5cX*JJ{&)_K#0w1gOdMGe&REN# zt_emJxKC+W502`j&nxQl5sE`(BlAH1@9|$HB{Xv~oWW%QgVoRDtOSv6{Fo!)GCMrH z#@Kkis5$D2$hhi2Lhj9JJnw$5%`!ak`IJhnq8Xx}yjR|Dxiv3}t+|}rd~F^jC%FrB zKdI8K%@_2y^*fJvXiESUPYhT%L%nsJxmkh^2Lpn_C7NSGw+gqb=E%{3UR!>b!P;ev zHzk3)OjCqo>;J03FB$z|{PL{HSJx~Z<_A$+{32|Oq3HD$Y2r1wG$HoM@0*38PiSyp zK#bMY+&#TTHq+!DrU=X6em%Lrj9NB9?E}1Kb^lRJj+MV+ca9yHHJ=Q)#;*W}*a^mTFQp3uRcyhY*=a*LEpoVkz(#$QcsNR{azIf^lq(h5{lYI1TqPG5FSVW(bOY`&!>*7d)0`4)YJ1}s>l;=2iYp){uRr6FqMuV-I{YDQugnT2>T-N8beq) z8lBgq$<)&4i;%$k9~5o2^;hbcD0!3v58kV;59H{vrnD((c$GkhLI|5kP-!-7PoJgr zJ*6rj?tFsws0Zy3X?#KEnu8gu*#MB9a6ri&O1VLa85?~pxo|h*B2+BHf1-IYk&bZq zNrF#4>x0T}SaG68fsjViqefy_OM4~s4sb9C8GjJ8OLP`=gLM~$!w44(b=)>~e_rQa z*ER6OC~*=R-7PXY$+jCIMbjx-e+il ze8Af-oB0eZz)eUy1OiT)7^8RE(-z31Fa?aes#}zhn7nvpXnq+!OpB_Q(Wj>nJQnxV zSZ6~b15)L>e=}{%;p68QirQwrz@N|lxH4D6NJ1gwiKO$e%vtl#s4K6;q3{htImYmz zHMGUBj`Xw3<}i8R`7F9C!_4>J$*-GqjwLB%nEQX75_uCkaxjaOw^5gju{5JY7r7&5 zM-LC}#!J3Iw&wd5fC;~cg44;&FB)U2&Z|i5szkvP98{1X!s%L}l-m30u(FYuS4M%y zFnDFet)3%vp5mNo7I^&m1XoQSekbyV8o=h6me_;DdW^Dz_&DoswCOcWF zQ|@3q(&xOEo%V%Ma>0`5VUy73T;8BWk>|GsIXM_UoLHX#a@EU9gmQU?5V@+hM{+jv z7FRd*A-UZ{^BYww+D(z{XQ#xDVp+lzzpCRSaUec?a|6;e$ffk& zX|rNDlW+9lYaUWUlTf&h9l|>0{iT}Of8#3~)HLWF7JBUKNuo2vb>-QJlAMkN-~r(M&s#tSOS=Su$rS)Y3B@LY#mpS3{l_!vszc=s1w zRT5@e)w-1D+#r#MwzBef-aF}W)CH)DTDDWs57@t74*HhtJ8gDN~GFq0im*F zc3SDN2lr^jfzPpjlEzNv7uLW^>|MS@o5|mqF0IMMssCb*NP$q?E-LLswnyFR6g{Ei zWgZGoh|>4%JPu|+@A+NKLPDR(qlUDefN|F6bnnW%$6_aX-*663+NZN}=@Gi%mq)E? zN)jP_Vcy%5Zys6UK|sl)bb{*EnA;I)u4p@|yjv?eJ@mkwohc&nno~m_inUBC}M8uMgaWRy~ zeLnr-Ph`d{QfVYOoquv`UHjlB@f=D)PE+WNK4IKF{d@cI(Bupq?=b5{>t7~j`5Zb{ zr%6Pu>>~O&+nOM3h9MNJ*w?r01u0TEZ2c#FgKT>~&}jZawNR{LMPD#RS$-HDl=03%clk4_wzmH8D)#uQ7 zUTY|C^GLXQ(S|oy?9AL`U{mz>dn2!$U#kAqK~+;l z>}CJfgT23ezt?|8?h-sx9GQ7XZ@4P{<|47c%Qb)joCY!7jfzwqzZtYsakZ26z`HMh zpBDA(KUb(Ji1O|HTb(5zp1SNguif(HpO!Y}fuMfV>hpYYtylTWIm^k}v-Y>8*De-H zOf833;r53EKB>R-(cC+HId)aqJ5L8MGM164o@_gG9guc9TV z8sJ(G-Ff#^oVc-}J&j9hkp2(33(2PbaB?|8Ao$W*uvs8*s-LW8(Q{U>O5&b zEn?tgbF>>)1aH9l3eHJNVmk~Iz&FR;Q)4AmO9`fP%*Cy52%zu16?dNxv*(;mB2t5 z&7s)%j3Nd{fSPkNJT(cs#2B1RnL%)lDU7_7!kPRdGc`^Z(B>QJkbn|2XhOn^VLHP? z{i$DK6&>6qpqGp$9Q`++%K0@m?t#3jw#PU;$MF5Sb;}4?lW1lAcf5%2JdgL|-D^=P zz0f+IQFKT-KNEQVGF;nOmBZ{Vq9g+cHUJKLOx&uloC8NER++souec2*h3!}SGM^*2 zGP#H+m!Ga|h5U93DQJ=|2V*}j;@*>1+f-d2PdpR0J2Jhvn${$EpAGpvFk64dkB;=3 z@07eeW5?(={23=Q%BDZ5ut;5F>Vgk2Xk+kWmTz~MK{o*$uK#G`mMYN(82tUcucCN= zi(w_`Q6TSqnE?Hh;EVIIt(HWmd}CmTE#FCCx9-*&7?%)GQ%BQ_0O>|75LzuK%XZYH zv~>CM$B58A-cJ5sF_6$K_sJ^kZC{wc6-J(}W0A*}c}653MTkdf$vgL^95ZB};AK7J z{@|~2UR?os--|FCT5?5x?o89r5Mm!7@PC8Rq729_OIxG=rrR0z!K+R;M`sBH2qQo!5yg$A-Rj#kf{2X=!4H+b zt2DU$btYg5o7>}YCCu~|0%vj^+re7-(TUu$#p!rWSSBBVp2lN#U?xtT+)*1AtT=4- z$%QN6wjom5+QZSCitxt4WHp^Yys=pG&|De( z*AWGzpTgUwB{Qy`n2^{tb`Exupmi+Tip-XbkgxR`W#4W z($SXNqHtz4$+em2Te!}W?s-SKi&Q_c(yh-*50l7!4#;4^OF#SFl3~RZ67cxmyJw$z zsR8Y_2^a3fT7zHU;k{`XpHa3gHCV!RJ@KMpZRH`2cgY+fzygvW2JgMbb#FwZpb%0~ zf)}-@oNPIJq=#R7`Od?EDr$ot|MfYxFLRsgKGMC=lfJerM7^c-o$+YW-QjsXi2NHD zu*=#1ZT~o+-t$ZvPs7)c*gJHDk4+6Z7P9b zwo+(|9@M<2Avn;Yf;W7TyEeGudodjkRuGVcRl@1d`%-M;;LIjxGFm(YWHvI)g5BeyJ`gwi1 zix@qpHr05$-tI@{N28yczHxroI7lJ`JUzA`orfVzmq~a81MH0Z>o+S}cb5ikj`XS( zO58Xy9acu3zw@gTpQ2nitjV;p{|f{{M9&=P+}pY@+2G9l8(0#okAn+0d;3Q(jyb`h za`tGAm78$R!!a+skYt~HmO~+#LCSUFHx1Bd4UrF3rP+!2M19#gQnFChCR<0<_%X*cwMAYY7UH^o`CZ?8& zrk0oC-DB&TXmV{_k?9DIOClFM!%RUo?P~DZ>xMyZo*p5>3943vs%myhabu8R0Cf{@ z9VvK>L&}MA<9u&JR;(K2c$3mEK9j$dSz|Ju%>Rc^g$u&P!40{8V|cc#N^Ug`St(Vgg9%MSK0@_B0A$u zP=(|tEDEvr{ddA<1UYem~sYLR1H4J;6k5cCR-a|>!&H} zC7-onG+Z2KF35`TD6>x8I5|eoki?m5__=IBHijuyiXRqKWDv2FUTwg%56)YRyRQS!k{{d2xRZqCd*W!Ys7354NerO97F$bvX^x>4{IlD;JH7Uaa&76hrD?t! zGr1@s$X-%07nLU1g(y!mAs`H66FIqihfqWUT%edaFi+debaH#8C+|#egEtlX^$Yn3 z^{pVFE_Wnyp`tF_F%zp4{~;W`aZ!e2b{$tizWO67`%Snn%Iwi_=&>F4=KP$4*_*+I zrr|j;;Fc*tH9nPYjXMe0tdS+0D_39Nr;qs9jc~UY7f#+8(A9@b$ES4T+q#h^b8hKb zC0MA&>Wj9wTW0T_&k|E1f0uS@WQSg}Bswf}o*S@a(MO1V$OvjX_f8KEmz^f3-Z#c= z7+L*^BO&A$0sCgrQhkBR#~K>;MN@M9k9P``Tx&%CHktOUgab5Gksn%>mqnhq&hAZk zXeU^J@4)MRe7}djTLTP;)i|8*|Bl`|CAt2>pD+8B*VgCfx2`U*zJrzoj!zLGxhjFD z=kI}*p3IXu-RYz~85oiWSc1If-`X+H58M^_m1XtM5i|-8wL9=KM)<@?`7A48Lon|U~ zB=k^1C0Y3vcyC*AaZ#Ck^EpY%8tK6yJ>WdSy(u`9V-Qttt|Kb1Tk-VaBHY=N%>1Kq z45i5uIze8o@CP-BY=Y3A08HX}d6gZfUTLDk%Rhg%5hL|Gl){zU%-e~w)c`$@WTMHD zT*Q_y;ej`cx_s_f7*fr9bW`eNR%Ta0y$f1A!Qj5_A>#}_PWq`N+gxEbLKs6FgwZKu zU96(FYRVcXd7{N}Y4JRaA*r+J)luyT{%w>`R~AMbsY%RA|5cRw&W7ihV8Ic=LDO*O zN_e_FSh%6mG~`c(TCO5Gf9)ewfN|3j7g|RM3xmX7{Fo-)U*g95D@AfsQ(pF2BF>oA zuAbx5Y!Si;fviw;U&6z;Fzt5#L!=X&YODfgdcJ~5CZ}O75}wMiK@2}D%IT%#@201pLE%wuGS+UV^Y=R`E4$d}|W08bMr2kXOWX7S1jP`hCn z`30&8-A<#}vFoucuSkql1$pnn?^Zmx4B0DRL~V1yEtE?C47^ns;#;ZD05jPdC7f^!j;}Wc(K3#;ADWZ zwf7$v3au!|L+qSEpc^hZuY{lw-P4vFT^4V_M@n8}44G7K#itPTga?LysOfo2qgf~$ zPr#KgRK#cURT)fnPKLg#b(9F1G4qkJKlPQMurBp~Rv3$8qG6GvCFT4@S zJPP!M&%3)!ITtnuwDcm)zmq?F!jpF z<|mD}_L=(tS#)(2+zdLi*$B@&Fdz`*>H2OpU`m+63b3-gR){PXfahr0O}`Ri4!B8_ z8*=qQXxj6u`#mOzrDQS59WumT6t}$T&UMtDd>aTe{Ec+s`L-0zx>D{0Hih#si}49W z(_|2wH|S5J%9qR$Dec~oJYL_JdY$vw8r~AYwLr>o1Z(k8emHe4z_N42N-!gJA3CvF=Dt9gU9_`5?^ zG#D@){mLRhrIQC15Nw9pkTE4UMp0Tpj$1gQl-0iXI}lsa?AFdj{h`&5eH1Ip^zq5qW>= zd?QL&`L|Dzn^7>pKfZVLed#V2pPP@sPZ$4x`LLfnI!ubxonOYy;$TGkd{pL-^O6-S z=zdZFGwZivP++M(U->~nK4#G`n57|HjoHyyZgdd|-)N8yy9o&nb9?&vh~R=vg4>7$ z59n}+NYo!#Uq>8a8rdd-U|+Ol__|62AvPWu5b5M;_E7yUrf)F)^s9^m^( z+l$>xU%4jlyP_cYe|!&?(ryCYySKbtjIF+li*JP!w01cp?jF0ykh1HcF}jer{w@2q zP8bBO4CKRl$)JgcelbhneC%mS7KjSs1?Ke_j{Owt<%Z#Podp%Hje|-L*cXET=gz8 z1K9%(CpqpFsAax`PHi1{y4YPVTMFH?>=2hdCP=uDc z{?iXfaAA&w$~KOwDI8GucRMNLia?dm317v$YAPKq!8A_7=|e0wentg{l8^~0-5dy} zu)hD{;8eGLfBO2t?~Z9VAXy;E|40SejO=YLf(rwrIs5(bvPu*2=9if4bycvPnMr{N zMW`xZ$0-fNhmf1f0Ia8jSU$l^Qss;>RxyIEBDPciEu3pZN*jrGN5kKk&(#u9hA?pG zv%DHLOU3*4PTyGCvwRT$TO@AJnEL8i1^;yetwV!JKpPMPmN`%*_N+c1P@7e1PYELV z%Y~)$R-%FYVelupjLypLJJbp^_tt*8snn5ZW_`VU|Hl}m_<2(j8Mf~nBxhyca!$4P zSUjHj1>k&xn(HX3(^NtwLsmI`1*=-bhi1w&_bLs6VG=|fDAp||rZhg}flHksr2A_5 z`ki*^KWC8Bpjry54wRZ_ra**4!$l__&fFP@kT86+=TiH>EI@Q8i++vRP1~P&oU9;& z&V_3|-EJljrW%0eUSZyhoDITbu?9Rifa{tgKDZ@zfXOl_c;x1tbg9=>mrs~MOBuyM zQV`rb=e|*kLPi4dA1Z92hf7o9H}D*Pufsx$)LI2CnNj(hx%UQxr3vwSE3_EcTnty; z7vRN2A+=t$FlGl~I&8=D%xzkZf3e`Xc8V&Z-O_$k9CDNtXPn+V*)Ef@7K!9LD^IA@ zR!t41_@UFxu8<~5L5f#S{Q42g)q#voe%=6E&x*re<FZIIEYHxjWOQ^wqlq1N4L$mX20%aE1``dMa_)t1~OKi!M-w&Rx!+@NP_mY<$8} zVC^R!`iu0tgZQ(V+fQ6$?5d5($4Lb;w6oe6{e{|wFbNxOliL>(3j5{HbK+X%eu$Vq zMD--5UP@Q24lFHw153usj#>mS?vE`4snr882&Njz%qy~?kIn~DrPSejdu>#}Vj7V_ zT2adRJ8~A^TWWBP1i{`cskuCWso+}+(M3%H^RyTp8@viSP$(=*@0ttZjIGZT~; z%FSRCNuL=0$;3Z39Yj;{Fy1zw&wf!gu9@?fJ!Gi7heFMJ%Ugn0Cw<(|96qMPAU1X{ zOSD$(O1q3IMWXkU_IM}b@(0?={671nbVC4l(jKUaj(d2Seez&bPZ*Fh;0}SDUNSU7pWKsp@zm`QTTYHy48o6@x3!X~RSR zZLCR<*NYyHnjbyH50nCN0&wWZVWFhHw@j-n{pOvaf! zQ3G9|A%0v8LHiuq_@NW{;o^1urj z4Sz7CM0yoEoH7MX(=MI_!r>An_fOg7D#neV-#uNlbUG{`Ee(Ak_wzWX(->m6yLK7L01#k5)IP$&nF>!;W5cHG{lio_wm?YGM(@LI-TT> zn0=sNhddEO-Dr6{Go5;RzD9?3`-+>PaZe~JKchsjYbe=j1?(1j3cR|owIy`hxr4g4 zHu7~kqxA9*SQnO-`R^>S1M?w)j7W+V?zqp4j(#)LAv4sl)Ocl^=QzYO4~vz{;v zVc!4l)UO^Ebf4>U_G>tq?Z}3vU4hE74G?tWrY`V3!xe2AUi=@xq8E$qq6q(?bJWOM z`W>+!8|i0HucnvGXvEd`r?_iOe;08tPaFUWx5VbAlC2;)(1HP6Ug*(4#5%wQKjMP~{&Y(qn&J3e5igTCB3{ z-7Vl=#CmHnqBit?#3t15Wb+Hyv(kBd@ZVYgStX3u`z|uvNk?y$DDK}(wCq0Biw7*# zZ@S=EvPER-SS-R&$06obFhl2krANi;aqCwmT*;ToY&!%X142YZ=13;r480AVG2(0+ z3jyL6P)II5hxSfc15KYeFMGgQkB_O&%(yxnY(>ODF-M=Rs0Cw?=zwjlv&1hv z1y9YN$%s#6MQy+zR!4yWik4BI4I>BiEjYg~$P#_3zUodSgH?$A!2nN>OFUs@z==Ji zhE7_&v~u~iBr6bguMI4!06R6xz(#Xc2-9}jZZCBnIn!wNI zv{Q3X#a-#`l#;`O@OCB;thfU$9x4f&si4<;9VC6za|4Pzfue{dPkfG`=;>1<4B^IQ z@##qSNGrqn#EG^Px$D*{q0EGPb0EP-G)kN8>$3YaWmou6V8-Us>717oeM6d$z`th> zL*}U4@UE<_s1LYEXO@-4_ugT4Gygc;yn9chwH16jM&fyV2KF`%PBfrfO$i9Wg#)?u z;!+{83D?(zo%4nUMZz%uerJryTn~4ENWN#qWd_$VMulU|afB{=Fw>;Y#26~urxxG= z(mxR+DS%a2e#@!uOC2c9q)!J6$T#~1PHWs?JFR>k&yPs&*1tQK3KlT}E0V7^qg%B* z7l90r+dlFF5%5^h66LJaQLMTY(?^&`6`$&V&oq2#mpDQ$S_?MMw~oPlkyY>rqA zs09~09J5xJ!^S+@)5{g_C7h3^pFE?*T3||Bl^9r5tM)v+p}<4g2r1k13q-omeN9XX z{=QT5WMQ);`FfVDGJ{5&64X629a;Jm)4}gO9qGaq$eNFq6LtCaJP7$cY?PROfEH-$ zY}z<w>SOx8C%)!9cvtTH=AR-S^2}7{Nm) z$9+J+0auK%ls?i8riT94n{hJ#nUo6;5XB5b7J7tJdz^6clc#X9TDhV!O40(&RunS< zXuW!zNHvm_(h@_ZJ((37B@T~=vrvH$M4zazvIES$(e5A*D^C)J6y&q#qHE#;1(}14 z%b`WwZ{Hy*XM|Qir&=6WqVK?jp$SpCXtq<|HZ%QJn><0mozd;-TO4-$(w3kGML;X8 zO1e`w@FSD_2u!$ugKY1u;=-4b}?&Ld3P8LVYY`xgWKV`r|S3!FMD>Rod)1v;$OYkF=IE$&e(xIBBRxtLjXx+ z13?Q$co;-?09=1s;JGdt6XT@*FY(uv&M9TMTmU=DMjZ*Dw+!RTmd(5I0 zD;MT^>i-Js6Zg&`W!5cFHxiK6%D1Zd-p4Qo}0Zp6zSt0gt)ICA% z%_slAgy{0Ytz7g#&NVCo9W3|M;QQ6L7qGuNL)sTR3B^P1WK-{F+0$KOqF};_VEzuM zidoMwzUA(TySg{|0tHG9f0Xu?rTDq`?>GC(E;sX$zJ!G1j!5X3GjCv7tgg;W5wU%E zCba%z20e1i5CZZfOzpL{B%0NmaDQ%RO4e%nSTFCCz>!3FeX+qWToBWDT$6pcVi)O* zbW8<)u48@1zSY@b7FV@Hy_ju2&i7^6M-~NuTK0UE{6$jra&k&2v1#Cw;0D2PGNt93 zpd7I{FQ_%t4wsPkBCsRJDXheW_gpH6)3u}Suc^h}2HCj_h^-=HncZ$mP+eXWgXyhm zi+cF1U|%sR`6dzF zD;iTbS|C%=w?8Wr$LmVV09?ujKyU{XE+9(WhbRP|sB1V-s$m>B-qY5Li5=6KImmIq z?q%x|QZf+)^ibSiWMEH#^w0hB34uEq-|x}y1cz8pVkYw&_Xo`UUH5y1TohjLk${*O zOH2OmT$=zT%_bx^zxBzBY52Cin?^@c^e`w4->W3O^L~vpl!j4VXJ1m?jp0qlKL!uh zMrq6uZSAe~X}P(g#^ERi4#fz^=j+qUC^slWci3*$y-5vcziZMlthc6vm$l_7R1>g= zAX-`$+IR%{>CMoK!}3>5lE51vto{SFLWc~oc{CX0w7}YepP+DjeG#f6MAOqIJTPX$ zCbg#%FO%Zv4~`sz5)Pd059D@^58`mLcXzL!56``M7DUH%u-;<+leZs&oGBtHYKO{3 zcMjY59kzuNRe&5{t|C4NEu=$I7)b<4Gme0qXq+e-I!RC*LYNfxJiN`=xk=B0;z%*y zv%<*dv#c3Bm?9SAOad7*xS0HTV#@@>ltSBeHF~TM!*?R}cR*B1mW7cLV}f zCw9%AzX4-j{?laL>B&(uZ<`MCobB+y$<+Bldo_t}NV(HxRO~2J!0dwFdD(Mpa2}*< z^?K&RUeFH%@M3NVEflW>Hl7R{MUC_EUC%!#=jrT+P{LrN1kl^jPkaEuh!!D8jlYUh zA_I1#LuC(5)9%d-DMId^!cG^OIni_)^ z`EqHEVi7LULnUJ-=OcQ}|?Nts*w9PRN=242WU$D>p>QG;H`8%JP+N0;? zB>jEfI*%gv!_5q84kI=-=FI{cBuRgvHz%}*)`&eWKXb!0fCb3_fRVK=zf!#(09y2(2%z< zh&|EBRw;Hcn*eYbH4v!hD?det*Pf%IzjjLd$jKeMmI$Or*0XgQ29tu^avq84sP@8v z9E4JOQl^jY+W33#FcGmfbT#m?YFu!~VAG|(=sHRQ)B+3w9i%`F z>a!kDO509SA@KYAdSgI0a1wXmHGontR7}y$U?z}yoR-xK!dIWj%gbN(!KZE-VpwgL zM)@JA(KGxZ{C=7t41d^D)N&Ex`yfQaIcc@VD$bOxm`A_ATVu|hvAfEW0WK21B!doL zo1gPnOOHlf6_HQ8+x)SPbcJc?W?)rhLP={|!w9NjJF-${&`**Zu}F4na}v3*zOaHN zgl}vONXQh9+OGDCSaciSIz?N$jUwEE82m&+E11Z`+!KdB zBq)}al8&-pqzLF>?;6f1a_DqtXu?~B*CVtvwtoQN@R`QWM z{~WqVsP;&#R#v@oNZG}Cw!kZ`l#x)p>!sfG=rfsTKZ8sx-SafH42w2V-Y;tmW z>R@Tkzv#uM2$tMtH$d@~D0)4P&V{t{3fG@|hXsSx%#9$nliTI#<7bjf&@b^k<4-JI7-VVC z&|@;GuA^6>)fb7#&XExnHoaf;TwPUXw*kJBQf4C@XO?j`a_YBMG@w7=&v!9~U0(n7 z2)opslNwd%+KSN4k$XORynD7BE2|j@<+(ofno>JC9zo+)Gewv|eurxmL=& zY?d+@OFp(wK#;P3(T;B;_)3{!M-7?!wvdKqMv5E5{7-h_A3|e^=b9%9Wik@|z69ET zrN5d5H=o8CzquZ83v%$geq?rU@}0Va3vaX8F&D#AJB~xl9QT~r2&^r#JQzLwe)?XB z(C%;G75?ZJ&X!4}Y#H?REvvvA&7shMP4_W7!;N)JNVW~eahc1^a2Zck2hOqWo0h_Zf4yh-kggZYVSy@YH!dPt_oj;Kss9XmF|=!V zx@Xd_@&&u?kM5au)!?N|w_NLDJQEAfKV%M{LIF!bh$O%D=?0@i@A{iAE9PlSXh7hS z;l#@T9`v#6`Fp5_~HSpe#JaihwToy zDt&8Yn;O??ueI0G0JCQ))u-SHCs}*H^1&2=f>4HLGuc~Ksf#TG%c0eUD!8y| z@d3pno6?8KFk6sSlbjZ9J@(G+t1=;Eh+<@JR)Bif5rsDq?1_P%wUCtwEk!9#*FY|# zXf7p=yt-v5A^=i!3ni@&9;gLpCcPS&*;J@3+;!*T`&s5zVXHb&rK+PP8pIP&qgTi^VOS6g|^dXv~l;+>ORcYMcwzGHDbIa8=wQ`@|#*%FRo?p7! z`CFOJD&WZDe38aPi@g?j%SdpC7aS)8{Gn&BGsuPyCzo#U%QK_;}s4k`c*M+0>`4o=vCdG!$yF2hL;C z2&{ZhGhw{v_h|0WzYvAlhG;PEO$Lv@p;VyKv^?O;}2@ zXHbe`P}5O4ZLSr`$V3U+O<Xvy4Y9%o^hgLJNP z=S1{ZH#6MogI z>IzqKd48fpxAa(@MiZf21aSuJ>3KK@{b%MkQgqtCde}#q4(ZquKCCb`UV`yrud5fo zVSdoU1$y12mBIP7>KjZBK4gYK1fDV#w@EPa)w3SiO&2_kh z#zEOA$Bh6^C8kPG{vr%ganamHNb8g~^ z%=%+q;<9xpZQh1ztgOz=K*?S^SJa5^r-2fG`oPsUc4$4k^_3L5mI)E0f8WesA*mg5 zf3&z7XBF$D{{H12sZDQ`&_;iB8Z3MHWxZianK=pmaOJLMb_4}6^j^*RZ!fu~1l84f z81seykEU-7kF*Q64JWoe@x-=m+jcUsZQGvMwrx%9bU3l?+uu3&{OG^$(;J1gYE^B) z|I-4%-go6|{U$zfN0s@K(?}8MX6@sBk$c$zERt6#S<$-DH!8PRKGZs5_*bzJX}drl z*%6zMcsaQa;Jf?D@5-2AS!s{6$2hEomEz)+E+yb<3aY{L(=zSVlebdf=c1C8mS`J{ zQd3hcrNn1=(XNC2(ncE@h-IJGvt*0X#PqYzTlwzpk4~;F_j-)=*SUz`8DzrqkJewl(8ZRR_nJz<14CLq3cr72+^32z zS`O%;Br^?I)OwUBttqLM;8?G-%NJ$!Crb6%dyS7Rm4gTI&R`+U zC%w}-+DU)86mAXav)f=Mh&FM^D~FA}x%@+GK1j|2rBO2$xX zOLaG|^Eu;)yN_o``+a@48JH6eU6=-TNL}ZSIn)eFSHdm`rZJOC+PEW-@ zj{(YsX?E^SN68)&D%jvZPlafN$vkN@n9Y3*b4fydt)PkLN;{V>N$B>FJclx*nj51wM_V{ zaC_HNK-d%s`ul|jnK)0i;3)}ppyPdv8U*^;fCA=lg!s4ST&a zcOA+y6l}DF@FnS`2G`c?*DxP8rJ+1YX1r0+uC{+3px!j}@88`1>#{|LzhOD_D^;$8 z_(pXIqOZ=M(4EbkY3b0O-TzP&LXA;FkAqB6bZEH0P>Hf((a|p;iLe=Lx6ZBiJ1pnH z27+U0FD>5P*57`5!+mMK=c{K&SMD$}80|1BSaJ|B+o}Q5SVu1~_%AB9xk~QI`viHq z(o@Cn@cp$`>L@ZswkN3BBbBI?uEcfYC&&|e$&~%?j5ro=H29asMEf91FndoFK$Xf7 z7Kfg+r&wSWS;oi+!lb9vd;*$|L5&oJ?I*vkvY!#3m5pY@o-OpILbS=Ri8d)~vn4Ay1<^|G zX(EG{e)T}|ZWo6#@H#tghkkXHSeEJ{*QtF$AKpBFJMxYVqUG#Ad9k<`%5EV$sIfKa ziH8GB?@Vv2kK?fD&wPM3EFV@fW0rqeZ!*16^wwn!W2-KD&tA$sIBOKNwvr!}J>?#g zs19*RY1lT+-WO~pyY%$)AD(nccTWFxS6fMo+YJJ| ze-U22yjvLk?d11!c>FUTFB(M4TK;58 zjN24p|K}yzwR6IbNlps=E<3&L|Jxe5<9|WzE}_?-+|*NeitKTO>?BW2@CBsIW?bWBeI;NvVeAtM9!;*X=x6D!dGsa z*V)gJ46e~2@qabiOpyE#{#JhBx zaNC(3GR#v;aqfrwWGguJA}%X}+Jp-*O|xBl;GU57hV;vEwE)(w^MQH1*980FU2 zqOFKF5lQ%<1QCO%@o;l*lk*cb1iv?x(nEuil>U8<_N1qX?bGE5S)_M$wPm##I(Pjx zI1pPRK71U!yUc*KQC4<*`YmTYmzHrbO-XK#V$iGdrW5KqXe1K^m8>?D_jfJiv?|J@ zH(Uh8xxA0$?)uLvbYjGuNq$)%Ci9v!%p3k#>Ij%Bg1}0KGQ_tVv?r4$#D`Hj(}e=u zX;?oDTcN>MLn!)_sMQ|?y#66O9Rk;L9@Q}ZW0tS!{xL*W62}pp>V-~?Mvzh6QQ!MM zQV9eT?IzV%k$a2(9LRsE*Qh;Fne1wTPtShvDbjeAfPvi`TqWmUTLd4E_l$mePM3?C z_X5W+8U?!t%ik7=LS#K-R?IR76}3Dh=#o*b18TmYh(s%|&416|mzrH@3i7Mb!V}2a zIQ8yIajT({m3^9ICJXBlz?LtU=vN8=c7<48bpv~^c z9zC7u7;j9D(->^*)9*d#W!yX4ostrwd4~%f6Fsr9^fmjx?DBuyWnb_xDDP8oBV2ps zoWrXEf8D_b&UJ-JgO*?CAG_s!bsSX+@_meW&$P|pdI!CIOVW~!kLwu?IeZ+=zl>Zr z44|MfZZvO4$0h-7X?`t{yDKl0lnLKjqmk34)Zz**x+lRCq`qCC=F=!7+TBe*9Uki` zAsZ+m$(7oMOMmEh3YNNzLbX;jJ1qY_I=d^qg8Z``vFQC3WuaUm=s8>`$EYFKu_4e` zfRsy*%=lyLQutrUXiFO}d1JT*>4p&my3_*G?n{4OLO>8eZatLs3JPnY1f>$jyLk|D2hq!_%0{qHZb|2eAv5|z1t zn`@8pcl&WEkgjKx#U()7Rk8h^kiNzY(|c)5-CJL>nqK+6chxNkA^kw36VEL_l6)Ui z4Y?3qY61hOK?_21O``_ZT$gXBgwLf5wB4Hc*;!5V{cFv!jFEx3%G|0ToE91^g}(Zi zwMv!{fpFbt@|9n{D|D8*2om1B@Q(&|j0p2M#H6ix6{15&Y7c2ypJ$IBm{fHVI)JQh zIoTCix6}gvZU0F?>hC7@Q*d|qK)ki^UyVUc!d;$OTVogqO*Bw1GtlMoBSrJ1mO zERAS>$iR1ELk0y{GLmMHpkxW<;}oezmcl}RUG_BcZEBl00L;t&Wg}=-AubkCoJ@~g zN0Xey+EHu%LGfhKM2hcQdR|?EfG<1L=MS+~AX8rYO?u)+TC){bRL*;f3KqB$B7!;F zRq)WPoPF4nYRX$ZY;dw48T&5jmVN_;AXFq)XT1SmRk`}$DJ^^P$sYwSz+GHVDIGft zgjnh`LM3n%-q-_wmNOO3p>^YbcY7C1)0fj(wEpGi+&HZ0v_khBb2|uTm?-C-)~X|j1mS(E&!KH;l zZhn+vXZt1PF>U^#&T>|(Jh+DpLp+ltBxwm1B&q2dp*?Pj9$$LRiJ=<&xOhenz!c&nqt7Hxbgt^L#2@V2Y0>`k0*!~JZJnoK^4rv3sW>FSZ+nX!7R82ZND!f~luc8J(1?>ON60u^ZMb`q36|MyhJ3 z+n$w@`@Ze_dUg9m<_@#qs0s4p8lJ{yQyCB4BfRn|g1M9tTmPd@UMT1F;aW#fZq+FB~Cwa7apjY!c zwqfso3pwOdVDqsYIRAMC_HzSf)eKNQ3_BCp0F0b4H>bH=xyy;*-}q9TbU^|qqX8va z%rl%eTk>xUJ4M%|6NzZ+x~?1>r!-PuHg7jF7w&*@rP}neM>E+zms+_uL6oo0$LiSQ z3%j7+BJll--J$gX4TY0#i5S~#B8_zFgAiOcxf|m~RpUlaWM7^G{GE?`=G)Rz<>bR9 z-ylN9a}o*t47)xpSa0ph+)>PZs+Jk8_79l9ZVEm6@4IUjvsv*~8+N_e1D3>18f>$} zH>C>%n6PePHXl1}^e6Jm6K|}A9O0*Wj}2qt^(NP0FR`2Kl3KThZm9JZe>Q7gRFZCp z@ct49FtI>hB9X2^UGPXdF=7#nK0I=3tG1hjq27>5dt5x4F5A9XpS|?AvHo-bRFlP? z?c@L8cLNcup<+TUkBYJTvdC0sOj-OnqA!}Zm!VYLdgt^-NAAN?6Kd^t;qh5>z52C( zzssKks<9e<_N}J}sWUVbV2-*PW1b@@;}~6K1{YI^GY>n&J+0iCZGLCiWDDZeU_nak zYyblP<58e+an9k_3`YB_Vc7a`-kL$noIvfY8*^vtMgEpmGV?S$=ud^-^%L04^Z<;K z)ZH((x10R9?Po3)oV+KHAbL>ez;9VpEY^LfAfS&Q6Z2Y0;?jO_tm45MZMdJ(l!7Ko zSCI#|b0GRWt-bW+w;V*MNn7fEBMv58UX-!p**6^e1L8Q>#>MhaGPpEOBYj10^9?O6 z3)W9UI^5bzwvtkr%VrJNq4HV31F7aMRo%Vb+ z8{Cbib1TrPLqtW7R8==l)eEAD48?Y}k*R2b_~9$PI89^JAP|sDg2I$7jqp-rT~C<9 zx$)dRgo%0Xr*c5~aEdgXa>b!Q!oIA*uo=cY`+d`4u8;6WNqJycdE`j-711mx!ht75ZUlmt+fwR+;REN@_j)P@WH8de7w@s zS$xCV+?<7ptg6_B)?O2w*XFql%WroQu}k!H{ic?%ThNiw6N3O!4o#*ig8@BIlkSLT zLwRT=)G!|K=_ZU`crA-ovY~F4MFfCEKS91oot9KgaTF;&cFG(+s`9fRL>by1KG49N z49|c9DK%*1bGbZ(RG0IJF%K0@%m#J!QvdedRcr-ZMFOkOo0f@3Gsf{M>%uVwqd6I3 z8-?jT%5Q&hMGjxNz28WLmO8A%K<7GjuvaybbgU;HxVf_Ow>|DNBfX@G-rr6(L?6No zgVwB_^hrqvRwL)#fGk=i`5%ewnjw}s~B^o#tr{^o>nT`W(SH}4R+5KPy!xA zpZlNnUk*3tIS}^^Lz5dr2>T}w{G3bhkM6eJn6@`#MvP~LnFD9E1?A5>CP&i5{geRX zPW!&wWwGSsR>N{t2u4Rgq5&t z+{--3)OOVydiU5vh~vA7XBY;$eoYCu-QU;`#F8f)T_KECBWJNAH$ls{<-~kU*wUpD zSg_da|4NRCv{p#jmK&Q^NR=@*XHOclUEgpX&A1=JdTfugw1J{+Ia)A|OOpZr8-1v` zO7!J|_1P#f-|1?&P)Ur{*Ki$2bFyJhi)h2mE&PBhS+^pt-Sjp*#Q?o%5e=?G){e&P9g*&}MuKnfC8>(4{?~Ag+m8dw zpV%7*=J>6&v@6UTW`16eL;l*|@$($L$~=_j)Letrr9M-$ri5QP?D5(DVoufP8(sFk zE)MSvi8Mwp{Prmy=`BU$m!0inDPyZSkwj;*o`z>U+t>I+uTSKZZcWnF&IsUmQ>PE_ ziacqZw(T=)?FaE#i0xS~8HAeTl;TBrO=hIqjRLA~*%~V~0~(#;F;v)mKDhJpofptH4h zzivUwkJ0k{2EL2*VEU@ScO>`5T2fW733OE`8hyQZVezjlm_^{(T>XiYJ|F*Rt)CQj zRh-?mRw=22kpD_doYpQ0@QHcZYKWphr|&&=FoiEtt+}J+OY;Hcm<;XQZts3Cj|@gk z+>E$_aH)GR88D&FYwZ@D-O@8kZ<3TQ3)|GsQ@y|YN#~Lcqmh>l|3KUKN8(;gZ1C|N z^}NkUSD~SC>7~zTw2$ig89wwwodXOIen5vVu4A0qY6FoE1Xvs~ZR-=TzDgO> zog%z}STlytbd^pRs6VTPi7Wh&q|mI2MI5-3xiO6^=|vG95%D;)gI6{g_6-z#G+Y zgJQ%H>=ToBP?LWr)zgL-VFlR(quxS&GE`tEJfX6mzgFUglbL(*wvU!q<8*T1Z07J1 zyEbx1(UTFMbBUR)yv&^^dhUHxVm8?DT)lTh$*TT{3GC92%Fh!Y@serU~fy^X{Db4>>&*+@!z4b zp{0SVmxd|>a^Kjl%acENt|H~iEEtheA&d^R+Hk~4%R6Ao%`$8z!sd~G7I)28@S{00 zB)zwKdh`#?B^pNKgYdGws)jAxLjGuo$#W{ZH&9?V!i*az9>xwCsOf zpa>%8{HL;`dNY-#4sll1E8842W1hqrKonwc9X8)GQtn0h=FCay&>?KO$1I!$USv6H z8K77WF9iR=^As(vd!$=|iC_)@6bpK?zH8)qHoVOcQIrRDSTFo)HSj`##5!tZ`mXOe zhP&P*%wQVhwO!r$ftWBa<&8ib-@?z`jHqt$_ZLY=j|4;PWQlfDb}u`=W^VuJGYQ(b zim@@!l7j*qR@u%3Tml0&;{|B)60BWRr3b9LjP`0SU*CGuKF&kYS9(%^bs!A`Iggd- z>$`^-_YrS>4~dUxRlOyp4Q>vRjd#1s=Brd%tNRPt|9(>Un;<|RqITy&UlR?#EU1Fe zIy9=OpUJB4?{e0Dx4LOsh(R#QY9c?aSK{rNkpbVt{n26kCgc|R?W`w zb&pM$xW~t#87J6uGgrJF^p^;RUd z_OMcbtjuri^SKSpMv9X9$D6yibmmIc`-^AGK?$VM{gEo#Ft``gS5)a|9kHL(*8K}!w8s|y{k0`ST6ZRgA^3NKLPzDD9nMqX zB5#cK=W;)V&(&XJuDN*Ue7yT|Jo9%%B;gd5tj)I*Pk~SXkH7*cWk5~j2h-EXp69$f zJlnL+D;P*BMm-;|JaS5ZPc=YP5^lFHh;DD-F%Rw;MIk(e@Tai$ptcxt)--v=b;_mt zRhJP29jX^eme6eG+$W#+5kYSYkwJA-_uLowsSj4S#7nkv%JV``n~VO;EFbme&li($Ce<(1jyjFH?0<52G9`V`telE@H0NHOwNqg>D>2xD>6k-| z8+-e|SyHWf#E(Ktb;mmS?ko7gc;aNM(id^w7eYuO_Wy81!#PcxN^Hx9awDn}@E$O6 zqlL_$ZSvz!8>#+Q0$w3~kK1m@XR&{YNKv#LOw|@R&rt6O`5ede!rCA+P~KIfDCtBf z&uZBN8iwdI!)5&l$9nnCYbjOSbMI#iAn2Gp>nK_@*uvHW0bAo7xalB6oq*)${!1*x zkC&NErJ8Nr2O0-$>r|ELA%BT%VYm`)EzhB839Mf3nw^yOvUjY>V7?#6kA1tDi>>w5 zDMduB2L4-W{ zhZao5)&FS${&be!@&NegUpzHgS0zKMl3zYrZLQWHTL0sRh58xq?aX^_T5le7zLE1@ zSGft3gs(I~f&+wL)i+C`a&ML8jBG!jDGk#g=)_Q)7bz+$2fP%cj$iR=jVG7RyXOE- zklyDz&MRQrbDzQ<(Ef5k+`A2Wx{OCRR7CC0Mq7@!oX*0ozL^XPk&!?xiDK`RkG=$< z$Ge`{6W1-$8iRzt>GcvhbAib=rxp!#ls#CN8~e z66iosBC3(T&5nJdm!SKKTJ356oUNc_I37gsKWf%EzP3crL515{8qH&^K9}DGHqq+_ z9bgW~Ie+bytaR>f5^ zN-&gBJFsA$&oo=m_tT|caLP86fw11ZHp&-eXWtM7W(~-t{g>|3b}Ev%V8< z>MNY&NOzbqcB?wWX>tAGH=6f;o1QY zKe^D32v+Pnh4N=rE2`7(N<#iKW%#-h{HKivto21O1O>e~& z3V_nazPOAOu;$Uuc}fKY6=Yjg5MN)X8a7&uY{n~miiz@Ue7N?7ZYTMei2(Ewmhifj zU8D|T_|z|#0Gg{Qz|_hR4Ke&kDllebyXs+Du>7GzoQOn+%$<&we_`5^FXaBSd4$qY z+J%Gr*-Qcrx@QL5Bf!P=FQ;P>Y(#Z<-tEwj5X($$QG-n#khXtnn7C=w`Qqb(Ae9sQ zJjVA;HhoR379i=4^Z_^3PNw_4L}?PxW})|-T)Fg#l_J%P(?`7`On+A zN&?<7v*;62jBD&F$SF6AJ$+l4i44fuAfE&*7{nYmFUwQzcPb;J5#Yv*ax1PZo*q)o zJql{O=E|tQ)loxzgcO6^6Z$9{Poe(MZsG1e-fhvM(w_bl?uto`vel=CmVW7o*RVg# zmUEulnm=8-zX9z0n;XgHJYD=!24PO-ShkX{n?6w7I&+OC1xOu;yjgVuGV+SuIq;Lv z=3)7?d&?f`?a(2_ylL$f{AL@kNxU^)QPu`-MpMK?ey`)DGFdgHeB&woiQyE7^FaJJ z4CDCpKGMNa2FCkdt?V)-`dZ2M+UiN``73z9pPm;3XK$RaNwt?xA7k1~rS{GjP)~2E zP4XqB@ur`1&$hmNGzbA>Pi>?O4*DZf`==>-;vhh74p9e{cMVN=Au;zB2^qFV(e&l5yHQx`nk?Y5{P^%Gpgon zh&m5y+-DI2!FRQD9xQpF;S=zi8fOJ;oUWI>cj(^q_)m=yyTFD5$IZVqE{ey*#zPhf z`^9_gr$r`ZvX%6sQlPVc|&=$h_MaSM${J{4bieQEJr_F1^vTZyHw<^e4o~c%UdSkuCo*H)U!_jPK~Q zicG_mOEf#Tr8Ei6h7{45Ay&#BMlc0~d7{q0wGh1szFcEU;tnNGUnf7kO@Y^>Bg))C zI(FJX!}K_vUujQ3vXBn0jpH2noJh5=0=Smt_4xLr`ciBnWy;Qr8%=W~0UO44aJ)_2 zMX8#tM$le_m>D~JV-Cu_y|8S+OVQoW4BljCx8G^(%fWQK*u88 z)1k^tGY0WtV`yDXHRpLcernYnk1e858fqDauiSXi*n3P;Q$@NvJLce%=Uh(VNG~aV zlFiGzC?Id5JyWwY*F^$Rjh}_TvElB1YHvjBn8TyD4iaM-a;cL%A|o?4H)hQiBrYZM z^**ockn#(%Uni9y6o{6%J?{}gUCi% z`)E$LU3iNuiZ#dWl#=I|hIl9VK5jLQh?3Sns&EMCa1gRRt&y7Z#a3B|SlbS^-AiJD z=5ER#!K|l>xAXQ)A;ss79^t{MzECTq`I+7FOi2ET?EVy(^W-QJ zHB`-p_G!S&U=G&)1rPruUQ1MwJB9CCrc$`qY2GA~T?DB3`D-uKDSc4lEJDTXSJ^GM zQ5gJYmVu3Y&O^_vQ{T(7B!2(7J(bN-;AfsF-1gMqgqBNhBYqW&q^k08oL=vg@BlN^ zm56zD19=9Wx2i-CznRNx_kj-p1X$TEO}zNpmGM=#YIyaMLK=?v8PyUQfB^PGVeH|_AN(G~E;+7|-gs3+L{z*=+iJ@y#>{lt zJM7qvh_^45ZR<5B+#&iI7N>%Lzjy}y;IOv6PnyqKSJIxWsVmbldn#YQ7_8cnGMS|L zXC^PY>R`77jFOa{YDe`}%^VwT9Ky#(F~U;M!zcV-9kBzd3R0q}y@9#9Dnvod5Vj;M zmvv^iSL67y^?v-nDO3bXo*wL|zX*?)o=dc%BI-T3eQ{r5Lx@_d1ilV-=O=F$Uv|M^ zUx}JJn7sku<0Zq{GG>pao<_^YW}?%qLW-=PkiD+H*cOLLaAn5q+&4dRkg-F$%#oWu zK|M>jMJ7`J2FPHRN?t08z?Vkwh}c3$`XeGF@A1*+Y;Oe>rSyRb6RG_|HCX zl~}(qEGup}3qg_feuMUJ308v3aH)VFf!36wH9#K!t&R8809)(2XBy7a1jia z!uZLiNynUxPB{-v;j>Fh-&C^c4=bd-CskERA%&T|p}=MVG3rqW()-nI3>qqzw=$z; zy(bEnss+FA9^amq)U&&Wj-E$^yotPABQH6{|B6*zNCYJ*|3sSm2gk0C707XW?e2OK z3=%zFA7!%vZz;3UDHq{`-0)X|)6Q!dzo%NpdhQlhh5`j{R_RKvt$bP#Sz1;RTpBL_ zx-g171wJjLJPsdcue?o3^#YGilljd86CKT)OAm5%1Kvt`UO;dSRM^Dpn;W@=he1Z6 zhhSzM6vOM}vZIehVXht%Zf7!nestU9bg9UqX?(9z1KjIa3tQO3ho1-q5NbM5be$%! zYMokauKfzSG<7K4yA{K1H-v_lQ;KJ?2g1axAWUA}XUJuIXVmTBK==tI8Eum2VxOb= zI!(JR;4mm9)Hxp(YmpN&XNg}!B+CpmY5AG^dwSCC=9|X!$@cpWY5*;rE}scY3B*>- z6H9HEyQ>Nb0F_USE**BmI_WAIX~?UY3NP!&>_)2_jRw~{yC)#HUz)37Z+}y<)=X6D zG45Dy7MagscI<@q_6o0-KBjy3DT2CMjh`UtN~se@tm=qnuFLL_ua3jRU^|Ae5+EcD zyiR0e`H$(6lOHzC>tu~wPA)!P6EvvH9&?%2iNNMSJ(?jum)T6&d}br1)&(D(aG^o09bxA7&3V-6!PKZG15 z?=#+px`vK8GW@Vi;qT-dLNe)WFA1)lE+oBGX z18hGzkK(u1g@oOhwtl-D=ny?8fd9zVo_wiIi^>Z1M(`4h|`=84dYY_l?B0 z_#^2WIro_k*{Cd|To7CoC7x^DStFD1Fs!I4X>*Fi~%@%Phf(~Aerq|hr#8akGfw>@@#J-)FHSaL_5WPH;Ld7e}oJw zfB?q9Hvt@n4H^y+@{@zx)^brlR!6BEYdLBWmfk|D=UufvimJZ_FF>M4FmJ&|Rj0$q$*-0X3l_-ZyW567a2)DbI@zT{Nn2 z7A5OT7h8t%T;q4{;uq~~JeK|~ET*2TQV?w^T5SAM@dkUOLf{81@@LYgjW?OtL5}%) zN>n=G^1n-KNZSca?=oI-9l&tp;hL|^=H5}WNT52M&2V~h!lG)6PlvO^71y6m@84Ks z?X5`9*m_Ql;*2Zm?)hAP5QCFuUAHsm<>33|a_p`VwtQ%7T4%`m+xE@v zaUo@v-slo(8*?mgoMU4hgd+mTvhXz=?H-6DS8YV4A3dB<<1b zfEtAVbO@C=Ju)Dw!Iz4%)N{sv?~Z*4oR8s%G_QS4;%+QY^$AycbnPBeEegWzuuBvr zd910GA(IvC*lrU3`_Q9OQF>3O1i{w^yKBEm*HY^cQs_(re}zX;@|^utZG6CX7p?Yx zq-7<{jw3jUt>U_Dt5CnFvG=zWn?2y0Tu$4?X3PWzKVV)Zr|}y3lE_M^jW(UJ5%^ zWveJ%%&4@FWqbpQ#Q+M6&dNNmbU!YcHPuJyU`uWhocTm=au&|4QaE{Q5_8Z1T>H-I zb93KLeD4okYuAD%~t~640sh{eCqSr=^I>Q&vP%+r-QaLlV+3U z-Y?Fg)>*;YRo%BCg$sHEa9mr46RJ8T3DD`3qa_I*eKtfOk&Oy|S?wI>9T+5Xijc4O zmw^M-7wagXmGX&>hxmue`hlSSFje0jw2zKkyly4WqlMZ1Np|=XNt+#6AtOt}uI*%x zp7SY^Ktq!c0>)87rY~g1Px61!Dik$-p~_b4qjMxSLW#ll)}@o3zgHwC=WF%+u~A{V znS{)@u*%N2KAId+Qn|cWByx76WDFzS8-Yvx)Z*H}pxc?QWTQFMc>en3{eaKCm;OW$ z0YQS1J^%}V2$<7Vs#$ryf#C1=)I%0B$^b?c~WjC#aqd|WlhzuXafnGNYX$3={p>ENc#B-z5eTTXH8zC{!>Bq_F2Vf(| zvKX1xd5Dk%>Z@ss!4EB~JxsGh(UZW#_YK@>)4q*y_8`7*;E?{3E(i2d6^gg@yxC{$ z?CLfZZ~aY5SI@Kp{i(DQ$qXayGNNhA4-R*q0! z%@d0WpVg|@X)$uT#MSd~=}4he2ErD-w$%atm6~3H*8zGnZfIVFta9&$0#Dt-PGCw_ z^%58unUs4sy{PZ&q4%Ho!i@Uu%Rn9^hW}Z(MP?``n-u7wWjkq5d#C(fZ%(^VN>R^R zPvn;zSK3)6GU~9=JATc{=Zli#aayPN@-UcNEf?^0mC3IogR_A!4E*>iizo0arldNo zH=dHK#i-LAJK0E9QiXSs%PN2l!qa=w-F>@@B@&PG%UUpI$xrV&&U$vjDT+k_OHSG^ z4fibFRjf2f>-+rD^AA_Tr*>j~z9D(7ST?H_2(Rel;G2-6{C5XSZl_yy{Jmssslk4I zGKn}8e}^W3-ccGpooE_Q(k!a!S9yj(tr>Xuql;;T&0<0pO0h1oI4F!&hccJB8}Qh?@Qs&W`cv zyw$H#3T<7qn|ciImMH6b&rj2ZbeK!{zTLP8i98kL22|(M3p^ftUR$={x)$MdnoetI z6J=wHKv=WiZkL|x(&%@^&AZ}yfHx~>RSd)0{589U$MTo+PgAR{)1EUnT+LqVD5LRy zqIaxty#q-~+8&>`c+TY&($ZOdy9U;dx6qVW(Xf!V5e#w~{V&nyvZ6ITroPzeft5eg z%hrbKBiQs0*vA&7&6e@9c&kx{Pfyq&)D?Tv4x4vRnbQRJ2sWLt1|jT|N>8mL<09}P z9PTpT%C%4*P!9<=yDXeDlzwc+CgXd}SB5GEU)dCVQAkXcVs28XKo$FyW7`-%c5UVq zj8~eibO~13?+sR2zq{~Sw4uV}xJXFg!-+m`rt!u@t+jUm#94Q}J}cuwn@50*1X6o? zl#{0RSGSB0!l-t^+l_R@#AloPt#{O{XSQ6dS*>!9X5Rg4@KMb>m=^6ir<|Yl80Gbl(&j;w{!l!i@g@f|H@JaJde?gNfdy*lDVGiHeC8&A13HVv+T2q7e5k67tP@7( z+AtALhDcR@L(9%dRlWJh1lkQs7)U618pXXf@<9&krAGTtmhX3%gok~Rh_Qq>>Jjtz z^BId4h)NwPF9qO9 zIgw;eNy->Zi=7^zOOVNY9Cm*^eq!~wqluZ_JHAK#g`iAY$EgTP;LZm9Dju;SW#%yP$4QSC)VK^FU)Ex$$OqT*xF_i#&jd*9ocOM1mtUWpeyA*%}1z3a^G zPqv?92sz|i{ZEa*V)Eh*DNiqIgZAa) zccr!4AC!J`*F}CC`t-G{*LA$99%t#NC$GU;K&O}*rp!mt^)VZVmh_fWs zL7f8ozu4Pgq|;;0!qU6z;-d-T-+NB-k&ZB4E!(aX&W;z(iM1~5w5Pnl`0+Wi2lOwH;ylrt73T3TN$=NV)%@)wjXBe zHwB@G@!|5gvY=JT8$4!+8mFj?n58p+(EK=3B9dC{bKHVB%4zc?fmcbp>a<%wNM85c z_4WB8LBR9rm|2z7?uSX( zj=~Dl6BQbgR3iXZAjqnkDGuD%axhUhfwOlhQ63^wK1~w`^U6B|U}E-i)lL2^9kUtG zo(3M|^oih}ZTm;0q<-ytH{QA5LqP-+POKmqb6WnbW(uc_tM-~Vibgh!N7^%FDz#sk z=PqBEM2<`N6`aW2L0aYIV$oJkbZzNZk3JP-HqKw?`KK7D|JHL-VH>Ebo{cU6_8=e> zRVYh+TV4MF(tg1r+KWf~nSIW*67Ksu7AL4u50#cuM@tR@(zKfKF8OyC&Va3o%m2xpbl*;#R zMm!d?04w+9AvBGruUDvb(?lLE)#Sh&9W|wiLWyYuSB_M zXosfhFE87sBaeT~WLQKND1TWA4_pKmV?12pNQq{jK_k2V@Lcc4pQ+X0Pv~t}*qt;C+c@ls*xMx*XSsr)(ah3A$>RXcxSQi8Ex%$k!gRuPLq`(Eg zVt4<@>%#IG5(^EDv0hlESAOA-A}|=vpDB}x75K*-dq6;_9bNyd`qSka1MrvXxE5(hZMK~Y>aRw8V#MY^Cv@(C>?7FfBk;C z;^mxtivfRm_62M|Z<9h1W5HZc0q%eck&`_(>Wj4Hi+uz2qan@=BV#{XFyj zv6K5c+jafUo9Rohd)xbg$I z1wNReRwuh>BmUh{3$~>}0|N@?{VxTZLi>0$M+_!9kggo)?0eoHE>y+=Hb)vF5bo2u zK?t5UGxyk80yuU#pFe3XiM7Ba>xsm#>BX*i&v(9`eHY|Bu3A>W`|c@gk9`^t56S?} zSN3)Gvbo`IpZhPXSM10T2fa)iD(6!R`hxjD?LA^<-CAl08r~+lY-~3ONNK=YqkB?* z*hpeP;+%nZ@!G@!dd@}?t3{Uj&kP3;L039j3^hp&TWEL7@V9A~42^6>P>?UirNZZ% zsI2vC1vjvZsES|X;BzhE-`RAOH7Q&ZZ!k6Pxg+dhrKn6b&W;SFG(G3Zc zPV$t7yRJc+Hi%T^V1XK69hc*PyL#(c;?ztq$EB#f&2%~eQP0Vmt2_REG^XX#kv&hT zP`$Z`eB2#v~cA_W8UQDV>79}K*>)&D~Xw&-72J6TkbNjSvQXjTg z-P`aEqN1YkjsOEbXGe#{fqqD>*rW2og|(mbE#BxLhn9LKil`vI$3eAT+6+XUF?YmG z3J}C~7(#Vo)ms1xdaA%ut`^G5lHc{*BCA_Q<2ECwauafLUUb%+22CaE|6D6+arlw- zLF@ZNfUozuw-q!U2h|X9!?P!59*cT9YRtRSuR9d0YYlf2WU*1Xh}Bo~S!3Z*=M;q% z_4g!GvZ0_T+5Bw&2QOwk3<3geA|)!M8l2C+$o9ENs!0!1Um1oiZtMmA^Mx3eVh|y- zkF)*vABovW^N2VISQ%eJ=QzQpm^(cv>-cxm(?Zxuup%!5hl%F&F(F$MbA!#V;#> zgRb#GtE-T%Bq}oCbr*qB>|ju$H`rE6z4(8XTSQn15)x5m6LF_G2Oxr}64VmZn#3M$ZEa&oe^pCLsJ&G?nZei_Yt`0>5@RWqSSIbf?;kMd^S$S} z_j&F;=gadulMFBm6{#;OEUeMfz@BJTow4foc){G-My)_9iW`uZ=VcvVc_8v4Tss}m zVg?k=Jxgjl3#s6Uo)EOOOqPOjmr$a0SA7=)!s5K(3q`nFUryCI0+1m){uWt{lr^!W z$^TOL^i3o{-X45#tjMUgPGX!<6(>OEcD@2t&K4$c&L9G#7rCy;xMM&_#o>TU&bi2O zS5Cnqt6wp463X+kQ2|2;Rt!ISCdqY93obJ>`~lZn@g8GhV4XGVt9 z0!ibbZ;98Y5T~+tG)?IM06xIYf{#8jn1v#@`t8PA6XH{|#hP_cV*lU-;ja?DIjKsm z{mPdAj+3_A^h^V}O-4VTbbfV$?XK-fdPLF&-3oH?b?ASwyi@s+F)Hc2J0hOJ9+35O z1=>iA2e9vfGneAE?eO2mr<(PI-xu03)KyR*et>%NNyTpLI}S&xhtK+`IdBQYh7WsO zKw)@Gn3u+0!cwDqls^^q=2B!g$7g9eYd&qq(NlHQsmZKVxyvB1`a@T7 zIGnSxTdH4K`ADvPGslg-*hp(G2%9uh+#f{x2!DBBxx}At4M8%&X41lN)Wk}-phPw> zdA-bt)on-3FO7B8yef}*!UL{Wi5|*9!pLB$>s`fEPts(^nMolcSzprvVVupmO*RuB z8dz?0{N4Tvt@N(|!Qn7;izmAZycVtL(R%ZKLNt2+cT#MBrMs^%|0w9bJ40=PX4H-o z)jEVN4Vw0G0rFiq zq-+%9X7BX|$CxUcjm{r7bu?9S9p>pW;Ez%3AqPiji0632 z5F1#qU8}eZ`)s)SIr5T%f&xaDSvTfz0A^%oAFb^U{x;s9@?hiiVr>=N z>54rX)VU{vz+W*4o)pj>)2iPTF`Z0E4u;U9%pbP-Z@pS8;jLd};_eR^ODW&Q2c&zye3`@~wGv z!$Cyo^{5e6LSN^uG8DD~llQ84gyIo*5mAVH1u94jgbI|-dK+!{gn+!fr`~`Dh*%jS zXV6d)PVpyq4UWx8EnqoTy%X5JvnSTMdDO|}$cRYG(SPOKzeBxe`NLy?Zis@@>k)5TJBxf`KDG4jmoR6t|xD=nWOa( z3ynU~Jnr6Uul>W^!l0{qUd1G#xYXyR9KaBc6R8U^aP5LtKswPxC`plNF0RIUfmW)z z@uRbyCS8kYIY~St7s#CF64DWpEzI_TIyFO|w!dfpvyeT0<|a7NV-#n=g~9W6TPLXW zdH`(!@hbFqp%l138OeFql=W#@>0z)$?F>8N9{l3P&P>T7tk$gIqp{1gbg>2rLU)NT z!Am1gCPU%Cx8{_Ho&BH|S8aE54FxdXMb8<|b$zeYqnV4j%{3ll5bA|xMqfy0vPhco zRGl=Ko8!7=Dq6eBl%0J+DMzU}gb-@i+Q3f$bogMSSb*m^$S1G6Y^iYP80WRmO7MDW z=pK8N1MmdCA>S0{TntqOf&E8G2iU#V<9QT0YQUBs0N^0k4?qHJE@A5Qf6yKw>eAfP zrg}w|b`X=H37~9dl)Mse=}oBy{eQnFQZkY@GQA>K{6taps_dvQ_-*%qbU^#v@9=AH z6>7O|FEGGGP-Khi;2QxSn1gHrEazrPqj}}MUj`|a&Q9wU_f!6N(p5cJh(VPfD_%17 zYb_tyT*1zELUDUq68Q>h1=c`(Uj*pDnms}UC}r1WSG0{3#A3IqNuVtbF& z*nKs&YHH&^dS^w3+OyezSN> literal 0 HcmV?d00001 From 3cb8ee026017d0caddc70aad095188512b09e9df Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 11 Feb 2023 16:46:02 -0500 Subject: [PATCH 240/702] MNT: Add py.typed to module root --- nibabel/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 nibabel/py.typed diff --git a/nibabel/py.typed b/nibabel/py.typed new file mode 100644 index 0000000000..e69de29bb2 From 08e4256607ecff5b90b59a24a04d41c46595a708 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 11 Feb 2023 16:46:39 -0500 Subject: [PATCH 241/702] MNT: Ignore nibabel-data when building sdists --- pyproject.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 83556a6b84..f944f8e685 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,7 +84,11 @@ typing = [ zstd = ["pyzstd >= 0.14.3"] [tool.hatch.build.targets.sdist] -exclude = [".git_archival.txt"] +exclude = [ + ".git_archival.txt", + # Submodules with large files; if we don't want them in the repo... + "nibabel-data/", +] [tool.hatch.build.targets.wheel] packages = ["nibabel", "nisext"] From 90bcd832404c73d4d4b075ca5c5000b7534eb8cb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 28 Jan 2023 17:17:33 -0500 Subject: [PATCH 242/702] MNT: Update pre-commit hooks STY: Installation issues with isort TYP: Ensure better (but slower) coverage for pre-commit mypy --- .pre-commit-config.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index addd5f5634..3a66205335 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: hooks: - id: blue - repo: https://github.com/pycqa/isort - rev: 5.11.2 + rev: 5.12.0 hooks: - id: isort - repo: https://github.com/pycqa/flake8 @@ -35,5 +35,7 @@ repos: - types-setuptools - types-Pillow - pydicom - # Sync with tool.mypy['exclude'] - exclude: "^(doc|nisext|tools)/|.*/tests/" + - numpy + - pyzstd + args: ["nibabel"] + pass_filenames: false From 7a7385ef036106710c7d3f75fa3c8a5364324658 Mon Sep 17 00:00:00 2001 From: Michiel Cottaar Date: Sun, 12 Feb 2023 10:36:27 -0500 Subject: [PATCH 243/702] BF: Support ragged voxel arrays in ParcelsAxis In the past we used `np.asanyarray(voxels)`, which would produce an array with dtype="object" if provided with a ragged array. This no longer works in numpy 1.24. Backport of gh-1194 Co-authored-by: Chris Markiewicz --- nibabel/cifti2/cifti2_axes.py | 11 +++-------- nibabel/cifti2/tests/test_axes.py | 23 ++++++++++++++++++++++- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 3142c8362b..0c75190f80 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -775,14 +775,9 @@ def __init__(self, name, voxels, vertices, affine=None, volume_shape=None, nvert maps names of surface elements to integers (not needed for volumetric CIFTI-2 files) """ self.name = np.asanyarray(name, dtype='U') - as_array = np.asanyarray(voxels) - if as_array.ndim == 1: - voxels = as_array.astype('object') - else: - voxels = np.empty(len(voxels), dtype='object') - for idx in range(len(voxels)): - voxels[idx] = as_array[idx] - self.voxels = np.asanyarray(voxels, dtype='object') + self.voxels = np.empty(len(voxels), dtype='object') + for idx, vox in enumerate(voxels): + self.voxels[idx] = vox self.vertices = np.asanyarray(vertices, dtype='object') self.affine = np.asanyarray(affine) if affine is not None else None self.volume_shape = volume_shape diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index 4cabd188b1..245964502f 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -494,13 +494,34 @@ def test_parcels(): assert prc != prc_other # test direct initialisation - axes.ParcelsAxis( + test_parcel = axes.ParcelsAxis( voxels=[np.ones((3, 2), dtype=int)], vertices=[{}], name=['single_voxel'], affine=np.eye(4), volume_shape=(2, 3, 4), ) + assert len(test_parcel) == 1 + + # test direct initialisation with multiple parcels + test_parcel = axes.ParcelsAxis( + voxels=[np.ones((3, 2), dtype=int), np.zeros((3, 2), dtype=int)], + vertices=[{}, {}], + name=['first_parcel', 'second_parcel'], + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) + assert len(test_parcel) == 2 + + # test direct initialisation with ragged voxel/vertices array + test_parcel = axes.ParcelsAxis( + voxels=[np.ones((3, 2), dtype=int), np.zeros((5, 2), dtype=int)], + vertices=[{}, {}], + name=['first_parcel', 'second_parcel'], + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) + assert len(test_parcel) == 2 with pytest.raises(ValueError): axes.ParcelsAxis( From 91cf8d6cca316a86b72014cf24f4c3d82b536346 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 11 Feb 2023 16:46:02 -0500 Subject: [PATCH 244/702] MNT: Add py.typed to module root --- nibabel/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 nibabel/py.typed diff --git a/nibabel/py.typed b/nibabel/py.typed new file mode 100644 index 0000000000..e69de29bb2 From 6464fb4c71beedce8f47d83987d4a255f14eb0c0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 11 Feb 2023 16:46:39 -0500 Subject: [PATCH 245/702] MNT: Ignore nibabel-data when building sdists --- pyproject.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6d44c607ed..65104ff137 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,7 +72,11 @@ typing = ["mypy", "pytest", "types-setuptools", "types-Pillow", "pydicom"] zstd = ["pyzstd >= 0.14.3"] [tool.hatch.build.targets.sdist] -exclude = [".git_archival.txt"] +exclude = [ + ".git_archival.txt", + # Submodules with large files; if we don't want them in the repo... + "nibabel-data/", +] [tool.hatch.build.targets.wheel] packages = ["nibabel", "nisext"] From c0be34695ebf04521a92190e62ea7f477ef7980b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 12 Feb 2023 10:52:43 -0500 Subject: [PATCH 246/702] DOC: 5.0.1 changelog --- Changelog | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/Changelog b/Changelog index 2eec48fa6b..69e55d1a9c 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,26 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +5.0.1 (Sunday 12 February 2023) +=============================== + +Bug-fix release in the 5.0.x series. + +Bug fixes +--------- +* Support ragged voxel arrays in + :class:`~nibabel.cifti2.cifti2_axes.ParcelsAxis` (pr/1194) (Michiel Cottaar, + reviewed by CM) +* Return to cwd on exception in :class:`~nibabel.tmpdirs.InTemporaryDirectory` + (pr/1184) (CM) + +Maintenance +----------- +* Add ``py.typed`` to module root to enable use of types in downstream + projects (CM, reviewed by Fernando Pérez-Garcia) +* Cache git-archive separately from Python packages in GitHub Actions + (pr/1186) (CM, reviewed by Zvi Baratz) + 5.0.0 (Monday 9 January 2023) ============================= From af1849bc6a6a84be2df12459727b1eb2bdee1304 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:01:16 +0200 Subject: [PATCH 247/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index daf8118012..35a16d3ee1 100644 --- a/README.rst +++ b/README.rst @@ -39,9 +39,9 @@ and `API reference`_. * - Code - - .. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: code style: black + .. image:: https://img.shields.io/badge/code%20style-blue-blue.svg + :target: https://blue.readthedocs.io/en/latest/ + :alt: code style: blue .. image:: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336 :target: https://pycqa.github.io/isort/ :alt: imports: isort From b1a053dc073a46e6a9ea965a95da6c2f1bbfbd31 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:01:47 +0200 Subject: [PATCH 248/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 35a16d3ee1..2acb0d4b42 100644 --- a/README.rst +++ b/README.rst @@ -42,7 +42,7 @@ and `API reference`_. .. image:: https://img.shields.io/badge/code%20style-blue-blue.svg :target: https://blue.readthedocs.io/en/latest/ :alt: code style: blue - .. image:: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336 + .. image:: https://img.shields.io/badge/imports-isort-1674b1 :target: https://pycqa.github.io/isort/ :alt: imports: isort .. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white From 0595cc7800f689c8c30038bfd423a7f4f9f84a35 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:02:42 +0200 Subject: [PATCH 249/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.rst b/README.rst index 2acb0d4b42..e3cc523811 100644 --- a/README.rst +++ b/README.rst @@ -51,9 +51,6 @@ and `API reference`_. .. image:: https://codecov.io/gh/nipy/NiBabel/branch/master/graph/badge.svg :target: https://codecov.io/gh/nipy/NiBabel :alt: codecov badge - .. image:: https://img.shields.io/librariesio/github/nipy/NiBabel - :target: https://libraries.io/github/nipy/NiBabel - :alt: Libraries.io dependency status for GitHub repo * - Status - From e5ad94de2fe6b6bcc216ded911fc8261c595a538 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:03:10 +0200 Subject: [PATCH 250/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.rst b/README.rst index e3cc523811..f3e1b7b58f 100644 --- a/README.rst +++ b/README.rst @@ -57,9 +57,6 @@ and `API reference`_. .. image:: https://github.com/nipy/NiBabel/actions/workflows/stable.yml/badge.svg :target: https://github.com/nipy/NiBabel/actions/workflows/stable.yml :alt: stable tests - .. image:: https://github.com/nipy/NiBabel/actions/workflows/pages/pages-build-deployment/badge.svg - :target: https://github.com/nipy/NiBabel/actions/workflows/pages/pages-build-deployment - :alt: documentation build * - Packaging - From 8ed90a97435ee00f05eea1dc756fb866398fc36c Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:03:54 +0200 Subject: [PATCH 251/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.rst b/README.rst index f3e1b7b58f..0ca052e6c6 100644 --- a/README.rst +++ b/README.rst @@ -63,9 +63,6 @@ and `API reference`_. .. image:: https://img.shields.io/pypi/v/nibabel.svg :target: https://pypi.python.org/pypi/nibabel/ :alt: PyPI version - .. image:: https://img.shields.io/pypi/format/nibabel.svg - :target: https://pypi.org/project/nibabel/ - :alt: PyPI Format .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg :target: https://pypi.python.org/pypi/nibabel/ :alt: PyPI - Python Version From 4298ccb05d2b6bf62fb75bf0b5b36de46c49c346 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:04:03 +0200 Subject: [PATCH 252/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 0ca052e6c6..ce39b539d0 100644 --- a/README.rst +++ b/README.rst @@ -8,7 +8,7 @@ :alt: NiBabel logo Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC +GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, and provides some limited support for DICOM_. From b0edd1a2d51b956b13c9c61dd964d11362f64c3e Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:04:38 +0200 Subject: [PATCH 253/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.rst b/README.rst index ce39b539d0..bea66dd1d7 100644 --- a/README.rst +++ b/README.rst @@ -66,9 +66,6 @@ and `API reference`_. .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg :target: https://pypi.python.org/pypi/nibabel/ :alt: PyPI - Python Version - .. image:: https://img.shields.io/pypi/implementation/nibabel.svg - :target: https://pypi.python.org/pypi/nibabel/ - :alt: PyPI - Implementation .. image:: https://img.shields.io/pypi/dm/nibabel.svg :target: https://pypistats.org/packages/nibabel/ :alt: PyPI - Downloads From 9ff1b7f6fb7b53331d14cfc6b51276963868e5b0 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:04:49 +0200 Subject: [PATCH 254/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index bea66dd1d7..5e11685eac 100644 --- a/README.rst +++ b/README.rst @@ -21,7 +21,7 @@ and `API reference`_. .. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm .. _CIFTI-2: https://www.nitrc.org/projects/cifti/ .. _DICOM: http://medical.nema.org/ -.. _documentation site: http://nipy.org/NiBabel +.. _documentation site: http://nipy.org/nibabel .. _ECAT: http://xmedcon.sourceforge.net/Docs/Ecat .. _Freesurfer: https://surfer.nmr.mgh.harvard.edu .. _GIFTI: https://www.nitrc.org/projects/gifti From e21a9235be30fc078a6276165f16e5fe942da820 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:05:09 +0200 Subject: [PATCH 255/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 5e11685eac..a0c90b4eb6 100644 --- a/README.rst +++ b/README.rst @@ -104,10 +104,11 @@ To install the latest development version, run:: pip install git+https://github.com/nipy/nibabel -For more information on previous releases, see the `release archive`_. +For more information on previous releases, see the `release archive`_ or `development changelog`_. .. _current release: https://pypi.python.org/pypi/NiBabel .. _release archive: https://github.com/nipy/NiBabel/releases +.. _development changelog: https://nipy.org/nibabel/changelog.html Mailing List ============ From 9e2780a6a02a58f4fbf39c07b8482909dbc0037e Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:05:38 +0200 Subject: [PATCH 256/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index a0c90b4eb6..e19a6cab8c 100644 --- a/README.rst +++ b/README.rst @@ -104,6 +104,10 @@ To install the latest development version, run:: pip install git+https://github.com/nipy/nibabel +When working on NiBabel itself, it may be useful to install in "editable" mode:: + + git clone https://github.com/nipy/nibabel.git + pip install -e ./nibabel For more information on previous releases, see the `release archive`_ or `development changelog`_. .. _current release: https://pypi.python.org/pypi/NiBabel From 59ea1a8293e828ad1097e2421660910642338d1d Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:05:46 +0200 Subject: [PATCH 257/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index e19a6cab8c..b071191593 100644 --- a/README.rst +++ b/README.rst @@ -134,7 +134,7 @@ please see the COPYING_ file. Citation ======== -Recent NiBabel releases have a Zenodo_ `Digital Object Identifier`_ (DOI) badge at +NiBabel releases have a Zenodo_ `Digital Object Identifier`_ (DOI) badge at the top of the release notes. Click on the badge for more information. .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier From 19f9a44262c3e16542c09c08dcf4eff8ac5a3ea1 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sun, 12 Feb 2023 20:12:01 +0200 Subject: [PATCH 258/702] Added missing blank line --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index b071191593..567941daf1 100644 --- a/README.rst +++ b/README.rst @@ -108,6 +108,7 @@ When working on NiBabel itself, it may be useful to install in "editable" mode:: git clone https://github.com/nipy/nibabel.git pip install -e ./nibabel + For more information on previous releases, see the `release archive`_ or `development changelog`_. .. _current release: https://pypi.python.org/pypi/NiBabel From d95ef9c706dc24132dd822f4683bfc5b0a6575bd Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Mon, 13 Feb 2023 09:14:33 +0200 Subject: [PATCH 259/702] Removed "Status" and "Packaging" sections Tried merging with other sections and using line breaks for some inner-section separation. --- README.rst | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/README.rst b/README.rst index b071191593..cabc3c285f 100644 --- a/README.rst +++ b/README.rst @@ -33,12 +33,18 @@ and `API reference`_. .. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ .. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ +.. role:: raw-html(raw) + :format: html + .. list-table:: :widths: 20 80 :header-rows: 0 * - Code - + .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg + :target: https://pypi.python.org/pypi/nibabel/ + :alt: PyPI - Python Version .. image:: https://img.shields.io/badge/code%20style-blue-blue.svg :target: https://blue.readthedocs.io/en/latest/ :alt: code style: blue @@ -48,30 +54,27 @@ and `API reference`_. .. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white :target: https://github.com/pre-commit/pre-commit :alt: pre-commit - .. image:: https://codecov.io/gh/nipy/NiBabel/branch/master/graph/badge.svg - :target: https://codecov.io/gh/nipy/NiBabel - :alt: codecov badge - * - Status - - + :raw-html:`
` + .. image:: https://github.com/nipy/NiBabel/actions/workflows/stable.yml/badge.svg :target: https://github.com/nipy/NiBabel/actions/workflows/stable.yml :alt: stable tests + .. image:: https://codecov.io/gh/nipy/NiBabel/branch/master/graph/badge.svg + :target: https://codecov.io/gh/nipy/NiBabel + :alt: codecov badge - * - Packaging + * - Distribution - .. image:: https://img.shields.io/pypi/v/nibabel.svg :target: https://pypi.python.org/pypi/nibabel/ :alt: PyPI version - .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg - :target: https://pypi.python.org/pypi/nibabel/ - :alt: PyPI - Python Version .. image:: https://img.shields.io/pypi/dm/nibabel.svg :target: https://pypistats.org/packages/nibabel/ :alt: PyPI - Downloads - * - Distribution - - + :raw-html:`
` + .. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 :target: https://repology.org/project/python:nibabel/versions :alt: Arch (AUR) @@ -84,6 +87,7 @@ and `API reference`_. .. image:: https://repology.org/badge/version-for-repo/nix_unstable/python:nibabel.svg?header=nixpkgs%20unstable :target: https://repology.org/project/python:nibabel/versions :alt: nixpkgs unstable + * - License & DOI - .. image:: https://img.shields.io/pypi/l/nibabel.svg @@ -108,6 +112,7 @@ When working on NiBabel itself, it may be useful to install in "editable" mode:: git clone https://github.com/nipy/nibabel.git pip install -e ./nibabel + For more information on previous releases, see the `release archive`_ or `development changelog`_. .. _current release: https://pypi.python.org/pypi/NiBabel @@ -134,7 +139,7 @@ please see the COPYING_ file. Citation ======== -NiBabel releases have a Zenodo_ `Digital Object Identifier`_ (DOI) badge at +NiBabel releases have a Zenodo_ `Digital Object Identifier`_ (DOI) badge at the top of the release notes. Click on the badge for more information. .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier From bdf5667d8276d630aa581bd28d318804f481ab86 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Mon, 13 Feb 2023 09:34:20 +0200 Subject: [PATCH 260/702] Revised badge table sectioning Line breaks did not work as expected. Split "Code" section to "Code" and "Tests", and "Distribution" section to "PyPI" and "Linux". --- README.rst | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/README.rst b/README.rst index cabc3c285f..f011a3aa55 100644 --- a/README.rst +++ b/README.rst @@ -33,9 +33,6 @@ and `API reference`_. .. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ .. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ -.. role:: raw-html(raw) - :format: html - .. list-table:: :widths: 20 80 :header-rows: 0 @@ -55,8 +52,8 @@ and `API reference`_. :target: https://github.com/pre-commit/pre-commit :alt: pre-commit - :raw-html:`
` - + * - Tests + - .. image:: https://github.com/nipy/NiBabel/actions/workflows/stable.yml/badge.svg :target: https://github.com/nipy/NiBabel/actions/workflows/stable.yml :alt: stable tests @@ -64,7 +61,7 @@ and `API reference`_. :target: https://codecov.io/gh/nipy/NiBabel :alt: codecov badge - * - Distribution + * - PyPI - .. image:: https://img.shields.io/pypi/v/nibabel.svg :target: https://pypi.python.org/pypi/nibabel/ @@ -73,14 +70,14 @@ and `API reference`_. :target: https://pypistats.org/packages/nibabel/ :alt: PyPI - Downloads - :raw-html:`
` - - .. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 - :target: https://repology.org/project/python:nibabel/versions - :alt: Arch (AUR) + * - Linux + - .. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable :target: https://repology.org/project/nibabel/versions :alt: Debian Unstable package + .. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 + :target: https://repology.org/project/python:nibabel/versions + :alt: Arch (AUR) .. image:: https://repology.org/badge/version-for-repo/gentoo_ovl_science/nibabel.svg?header=Gentoo%20%28%3A%3Ascience%29 :target: https://repology.org/project/nibabel/versions :alt: Gentoo (::science) From b8406994b0e5fdcac38e89d1c2e00d55f73d1aaa Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Mon, 13 Feb 2023 16:46:02 +0200 Subject: [PATCH 261/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index f011a3aa55..738f34036e 100644 --- a/README.rst +++ b/README.rst @@ -67,7 +67,7 @@ and `API reference`_. :target: https://pypi.python.org/pypi/nibabel/ :alt: PyPI version .. image:: https://img.shields.io/pypi/dm/nibabel.svg - :target: https://pypistats.org/packages/nibabel/ + :target: https://pypistats.org/packages/nibabel :alt: PyPI - Downloads * - Linux From 95c41b32d0733a56658107e768011006aac581b6 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Mon, 13 Feb 2023 16:46:20 +0200 Subject: [PATCH 262/702] Update README.rst Co-authored-by: Chris Markiewicz --- README.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 738f34036e..23d1f550a5 100644 --- a/README.rst +++ b/README.rst @@ -70,8 +70,11 @@ and `API reference`_. :target: https://pypistats.org/packages/nibabel :alt: PyPI - Downloads - * - Linux + * - Packages - + .. image:: https://img.shields.io/conda/vn/conda-forge/nibabel + :target: https://anaconda.org/conda-forge/nibabel + :alt: Conda package .. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable :target: https://repology.org/project/nibabel/versions :alt: Debian Unstable package From 08187d5f2f0a0293bee87eb84b8bff13635910c4 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Mon, 13 Feb 2023 16:52:10 +0200 Subject: [PATCH 263/702] Added missing space --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 23d1f550a5..65c9ad383c 100644 --- a/README.rst +++ b/README.rst @@ -72,7 +72,7 @@ and `API reference`_. * - Packages - - .. image:: https://img.shields.io/conda/vn/conda-forge/nibabel + .. image:: https://img.shields.io/conda/vn/conda-forge/nibabel :target: https://anaconda.org/conda-forge/nibabel :alt: Conda package .. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable From 625c75bb8efa640d87f4b9c6b8af168c9bc36462 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Mon, 13 Feb 2023 18:14:46 +0200 Subject: [PATCH 264/702] Copied README content to long_description --- README.rst | 2 +- nibabel/info.py | 183 ++++++++++++++++++++++++++++++++---------------- 2 files changed, 122 insertions(+), 63 deletions(-) diff --git a/README.rst b/README.rst index 65c9ad383c..641480b8aa 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ .. -*- rest -*- .. vim:syntax=rst -.. Following contents should be copied from LONG_DESCRIPTION in NiBabel/info.py +.. Following contents should be copied from LONG_DESCRIPTION in nibabel/info.py .. image:: doc/pics/logo.png :target: https://nipy.org/nibabel diff --git a/nibabel/info.py b/nibabel/info.py index 96031ac954..97be482e89 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -12,86 +12,145 @@ # We also include this text in the docs by ``..include::`` in # ``docs/source/index.rst``. long_description = """ -======= -NiBabel -======= +.. image:: doc/pics/logo.png + :target: https://nipy.org/nibabel + :alt: NiBabel logo -Read / write access to some common neuroimaging file formats +Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), +GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. +In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, +and provides some limited support for DICOM_. -This package provides read +/- write access to some common medical and -neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, MGH_ and -ECAT_ as well as Philips PAR/REC. We can read and write FreeSurfer_ geometry, -annotation and morphometry files. There is some very limited support for -DICOM_. NiBabel is the successor of PyNIfTI_. +NiBabel's API gives full or selective access to header information (metadata), and image +data is made available via NumPy arrays. For more information, see NiBabel's `documentation site`_ +and `API reference`_. -.. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm +.. _API reference: https://nipy.org/nibabel/api.html .. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes -.. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ -.. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ +.. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm .. _CIFTI-2: https://www.nitrc.org/projects/cifti/ +.. _DICOM: http://medical.nema.org/ +.. _documentation site: http://nipy.org/nibabel +.. _ECAT: http://xmedcon.sourceforge.net/Docs/Ecat +.. _Freesurfer: https://surfer.nmr.mgh.harvard.edu +.. _GIFTI: https://www.nitrc.org/projects/gifti +.. _MGH: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat .. _MINC1: https://en.wikibooks.org/wiki/MINC/Reference/MINC1_File_Format_Reference .. _MINC2: https://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference -.. _PyNIfTI: http://niftilib.sourceforge.net/pynifti/ -.. _GIFTI: https://www.nitrc.org/projects/gifti -.. _MGH: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat -.. _ECAT: http://xmedcon.sourceforge.net/Docs/Ecat -.. _Freesurfer: https://surfer.nmr.mgh.harvard.edu -.. _DICOM: http://medical.nema.org/ - -The various image format classes give full or selective access to header -(meta) information and access to the image data is made available via NumPy -arrays. - -Website -======= - -Current documentation on nibabel can always be found at the `NIPY nibabel -website `_. +.. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ +.. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ -Mailing Lists -============= +.. list-table:: + :widths: 20 80 + :header-rows: 0 + + * - Code + - + .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg + :target: https://pypi.python.org/pypi/nibabel/ + :alt: PyPI - Python Version + .. image:: https://img.shields.io/badge/code%20style-blue-blue.svg + :target: https://blue.readthedocs.io/en/latest/ + :alt: code style: blue + .. image:: https://img.shields.io/badge/imports-isort-1674b1 + :target: https://pycqa.github.io/isort/ + :alt: imports: isort + .. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white + :target: https://github.com/pre-commit/pre-commit + :alt: pre-commit + + * - Tests + - + .. image:: https://github.com/nipy/NiBabel/actions/workflows/stable.yml/badge.svg + :target: https://github.com/nipy/NiBabel/actions/workflows/stable.yml + :alt: stable tests + .. image:: https://codecov.io/gh/nipy/NiBabel/branch/master/graph/badge.svg + :target: https://codecov.io/gh/nipy/NiBabel + :alt: codecov badge + + * - PyPI + - + .. image:: https://img.shields.io/pypi/v/nibabel.svg + :target: https://pypi.python.org/pypi/nibabel/ + :alt: PyPI version + .. image:: https://img.shields.io/pypi/dm/nibabel.svg + :target: https://pypistats.org/packages/nibabel + :alt: PyPI - Downloads + + * - Packages + - + .. image:: https://img.shields.io/conda/vn/conda-forge/nibabel + :target: https://anaconda.org/conda-forge/nibabel + :alt: Conda package + .. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable + :target: https://repology.org/project/nibabel/versions + :alt: Debian Unstable package + .. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 + :target: https://repology.org/project/python:nibabel/versions + :alt: Arch (AUR) + .. image:: https://repology.org/badge/version-for-repo/gentoo_ovl_science/nibabel.svg?header=Gentoo%20%28%3A%3Ascience%29 + :target: https://repology.org/project/nibabel/versions + :alt: Gentoo (::science) + .. image:: https://repology.org/badge/version-for-repo/nix_unstable/python:nibabel.svg?header=nixpkgs%20unstable + :target: https://repology.org/project/python:nibabel/versions + :alt: nixpkgs unstable + + * - License & DOI + - + .. image:: https://img.shields.io/pypi/l/nibabel.svg + :target: https://github.com/nipy/nibabel/blob/master/COPYING + :alt: License + .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.591597.svg + :target: https://doi.org/10.5281/zenodo.591597 + :alt: Zenodo DOI + +Installation +============ + +To install NiBabel's `current release`_ with ``pip``, run:: + + pip install nibabel + +To install the latest development version, run:: + + pip install git+https://github.com/nipy/nibabel + +When working on NiBabel itself, it may be useful to install in "editable" mode:: + + git clone https://github.com/nipy/nibabel.git + pip install -e ./nibabel + +For more information on previous releases, see the `release archive`_ or `development changelog`_. + +.. _current release: https://pypi.python.org/pypi/NiBabel +.. _release archive: https://github.com/nipy/NiBabel/releases +.. _development changelog: https://nipy.org/nibabel/changelog.html + +Mailing List +============ Please send any questions or suggestions to the `neuroimaging mailing list `_. -Code -==== - -Install nibabel with:: - - pip install nibabel - -You may also be interested in: - -* the `nibabel code repository`_ on Github; -* documentation_ for all releases and current development tree; -* download the `current release`_ from pypi; -* download `current development version`_ as a zip file; -* downloads of all `available releases`_. - -.. _nibabel code repository: https://github.com/nipy/nibabel -.. _Documentation: http://nipy.org/nibabel -.. _current release: https://pypi.python.org/pypi/nibabel -.. _current development version: https://github.com/nipy/nibabel/archive/master.zip -.. _available releases: https://github.com/nipy/nibabel/releases - License ======= -Nibabel is licensed under the terms of the MIT license. Some code included -with nibabel is licensed under the BSD license. Please see the COPYING file -in the nibabel distribution. +NiBabel is licensed under the terms of the `MIT license`_. Some code included +with NiBabel is licensed under the `BSD license`_. For more information, +please see the COPYING_ file. -Citing nibabel -============== +.. _BSD license: https://opensource.org/licenses/BSD-3-Clause +.. _COPYING: https://github.com/nipy/nibabel/blob/master/COPYING +.. _MIT license: https://github.com/nipy/nibabel/blob/master/COPYING#nibabel -Please see the `available releases`_ for the release of nibabel that you are -using. Recent releases have a Zenodo_ `Digital Object Identifier`_ badge at -the top of the release notes. Click on the badge for more information. +Citation +======== + +NiBabel releases have a Zenodo_ `Digital Object Identifier`_ (DOI) badge at +the top of the release notes. Click on the badge for more information. -.. _zenodo: https://zenodo.org .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier -""" +.. _zenodo: https://zenodo.org +""" # noqa: E501 From 358e575b4ef9a4422fb74d9cbb70d760920c9658 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 14 Feb 2023 07:52:42 -0500 Subject: [PATCH 265/702] DOC: Move logo and badges out of long description into README * Create top-level header in index.rst * Remove duplicate definition of MIT License URL --- README.rst | 65 ++++++++++++++++++------------------- doc/source/index.rst | 4 +++ nibabel/info.py | 76 +++----------------------------------------- 3 files changed, 41 insertions(+), 104 deletions(-) diff --git a/README.rst b/README.rst index 641480b8aa..6916c494b3 100644 --- a/README.rst +++ b/README.rst @@ -1,38 +1,10 @@ .. -*- rest -*- .. vim:syntax=rst -.. Following contents should be copied from LONG_DESCRIPTION in nibabel/info.py - .. image:: doc/pics/logo.png :target: https://nipy.org/nibabel :alt: NiBabel logo -Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. -In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, -and provides some limited support for DICOM_. - -NiBabel's API gives full or selective access to header information (metadata), and image -data is made available via NumPy arrays. For more information, see NiBabel's `documentation site`_ -and `API reference`_. - -.. _API reference: https://nipy.org/nibabel/api.html -.. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes -.. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm -.. _CIFTI-2: https://www.nitrc.org/projects/cifti/ -.. _DICOM: http://medical.nema.org/ -.. _documentation site: http://nipy.org/nibabel -.. _ECAT: http://xmedcon.sourceforge.net/Docs/Ecat -.. _Freesurfer: https://surfer.nmr.mgh.harvard.edu -.. _GIFTI: https://www.nitrc.org/projects/gifti -.. _MGH: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat -.. _MINC1: - https://en.wikibooks.org/wiki/MINC/Reference/MINC1_File_Format_Reference -.. _MINC2: - https://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference -.. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ -.. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ - .. list-table:: :widths: 20 80 :header-rows: 0 @@ -97,6 +69,35 @@ and `API reference`_. :target: https://doi.org/10.5281/zenodo.591597 :alt: Zenodo DOI +.. Following contents should be copied from LONG_DESCRIPTION in nibabel/info.py + + +Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), +GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. +In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, +and provides some limited support for DICOM_. + +NiBabel's API gives full or selective access to header information (metadata), and image +data is made available via NumPy arrays. For more information, see NiBabel's `documentation site`_ +and `API reference`_. + +.. _API reference: https://nipy.org/nibabel/api.html +.. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes +.. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm +.. _CIFTI-2: https://www.nitrc.org/projects/cifti/ +.. _DICOM: http://medical.nema.org/ +.. _documentation site: http://nipy.org/nibabel +.. _ECAT: http://xmedcon.sourceforge.net/Docs/Ecat +.. _Freesurfer: https://surfer.nmr.mgh.harvard.edu +.. _GIFTI: https://www.nitrc.org/projects/gifti +.. _MGH: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat +.. _MINC1: + https://en.wikibooks.org/wiki/MINC/Reference/MINC1_File_Format_Reference +.. _MINC2: + https://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference +.. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ +.. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ + Installation ============ @@ -128,13 +129,13 @@ Please send any questions or suggestions to the `neuroimaging mailing list License ======= -NiBabel is licensed under the terms of the `MIT license`_. Some code included -with NiBabel is licensed under the `BSD license`_. For more information, -please see the COPYING_ file. +NiBabel is licensed under the terms of the `MIT license +`__. +Some code included with NiBabel is licensed under the `BSD license`_. +For more information, please see the COPYING_ file. .. _BSD license: https://opensource.org/licenses/BSD-3-Clause .. _COPYING: https://github.com/nipy/nibabel/blob/master/COPYING -.. _MIT license: https://github.com/nipy/nibabel/blob/master/COPYING#nibabel Citation ======== diff --git a/doc/source/index.rst b/doc/source/index.rst index 8eb8a9c7d5..701de01362 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -7,6 +7,10 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### +======= +NiBabel +======= + .. include:: _long_description.inc Documentation diff --git a/nibabel/info.py b/nibabel/info.py index 97be482e89..c84153f220 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -12,10 +12,6 @@ # We also include this text in the docs by ``..include::`` in # ``docs/source/index.rst``. long_description = """ -.. image:: doc/pics/logo.png - :target: https://nipy.org/nibabel - :alt: NiBabel logo - Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, @@ -42,70 +38,6 @@ .. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ .. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ -.. list-table:: - :widths: 20 80 - :header-rows: 0 - - * - Code - - - .. image:: https://img.shields.io/pypi/pyversions/nibabel.svg - :target: https://pypi.python.org/pypi/nibabel/ - :alt: PyPI - Python Version - .. image:: https://img.shields.io/badge/code%20style-blue-blue.svg - :target: https://blue.readthedocs.io/en/latest/ - :alt: code style: blue - .. image:: https://img.shields.io/badge/imports-isort-1674b1 - :target: https://pycqa.github.io/isort/ - :alt: imports: isort - .. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white - :target: https://github.com/pre-commit/pre-commit - :alt: pre-commit - - * - Tests - - - .. image:: https://github.com/nipy/NiBabel/actions/workflows/stable.yml/badge.svg - :target: https://github.com/nipy/NiBabel/actions/workflows/stable.yml - :alt: stable tests - .. image:: https://codecov.io/gh/nipy/NiBabel/branch/master/graph/badge.svg - :target: https://codecov.io/gh/nipy/NiBabel - :alt: codecov badge - - * - PyPI - - - .. image:: https://img.shields.io/pypi/v/nibabel.svg - :target: https://pypi.python.org/pypi/nibabel/ - :alt: PyPI version - .. image:: https://img.shields.io/pypi/dm/nibabel.svg - :target: https://pypistats.org/packages/nibabel - :alt: PyPI - Downloads - - * - Packages - - - .. image:: https://img.shields.io/conda/vn/conda-forge/nibabel - :target: https://anaconda.org/conda-forge/nibabel - :alt: Conda package - .. image:: https://repology.org/badge/version-for-repo/debian_unstable/nibabel.svg?header=Debian%20Unstable - :target: https://repology.org/project/nibabel/versions - :alt: Debian Unstable package - .. image:: https://repology.org/badge/version-for-repo/aur/python:nibabel.svg?header=Arch%20%28%41%55%52%29 - :target: https://repology.org/project/python:nibabel/versions - :alt: Arch (AUR) - .. image:: https://repology.org/badge/version-for-repo/gentoo_ovl_science/nibabel.svg?header=Gentoo%20%28%3A%3Ascience%29 - :target: https://repology.org/project/nibabel/versions - :alt: Gentoo (::science) - .. image:: https://repology.org/badge/version-for-repo/nix_unstable/python:nibabel.svg?header=nixpkgs%20unstable - :target: https://repology.org/project/python:nibabel/versions - :alt: nixpkgs unstable - - * - License & DOI - - - .. image:: https://img.shields.io/pypi/l/nibabel.svg - :target: https://github.com/nipy/nibabel/blob/master/COPYING - :alt: License - .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.591597.svg - :target: https://doi.org/10.5281/zenodo.591597 - :alt: Zenodo DOI - Installation ============ @@ -137,13 +69,13 @@ License ======= -NiBabel is licensed under the terms of the `MIT license`_. Some code included -with NiBabel is licensed under the `BSD license`_. For more information, -please see the COPYING_ file. +NiBabel is licensed under the terms of the `MIT license +`__. +Some code included with NiBabel is licensed under the `BSD license`_. +For more information, please see the COPYING_ file. .. _BSD license: https://opensource.org/licenses/BSD-3-Clause .. _COPYING: https://github.com/nipy/nibabel/blob/master/COPYING -.. _MIT license: https://github.com/nipy/nibabel/blob/master/COPYING#nibabel Citation ======== From 6d1fd303a8f24fb13bcbd233bcf68244a0158b60 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 14 Feb 2023 08:12:11 -0500 Subject: [PATCH 266/702] DOC: Update nibabel.info docstring, add line breaks for nicer pydoc experience --- README.rst | 18 ++++++++++-------- nibabel/info.py | 24 +++++++++++++----------- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/README.rst b/README.rst index 6916c494b3..45856f6795 100644 --- a/README.rst +++ b/README.rst @@ -72,14 +72,15 @@ .. Following contents should be copied from LONG_DESCRIPTION in nibabel/info.py -Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. -In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, -and provides some limited support for DICOM_. +Read and write access to common neuroimaging file formats, including: +ANALYZE_ (plain, SPM99, SPM2 and later), GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, +MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. +In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and +morphometry files, and provides some limited support for DICOM_. -NiBabel's API gives full or selective access to header information (metadata), and image -data is made available via NumPy arrays. For more information, see NiBabel's `documentation site`_ -and `API reference`_. +NiBabel's API gives full or selective access to header information (metadata), +and image data is made available via NumPy arrays. For more information, see +NiBabel's `documentation site`_ and `API reference`_. .. _API reference: https://nipy.org/nibabel/api.html .. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes @@ -114,7 +115,8 @@ When working on NiBabel itself, it may be useful to install in "editable" mode:: git clone https://github.com/nipy/nibabel.git pip install -e ./nibabel -For more information on previous releases, see the `release archive`_ or `development changelog`_. +For more information on previous releases, see the `release archive`_ or +`development changelog`_. .. _current release: https://pypi.python.org/pypi/NiBabel .. _release archive: https://github.com/nipy/NiBabel/releases diff --git a/nibabel/info.py b/nibabel/info.py index c84153f220..063978444c 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -1,7 +1,7 @@ -"""Define distribution parameters for nibabel, including package version +"""Define static nibabel metadata for nibabel -The long description parameter is used to fill settings in setup.py, the -nibabel top-level docstring, and in building the docs. +The long description parameter is used in the nibabel top-level docstring, +and in building the docs. We exec this file in several places, so it cannot import nibabel or use relative imports. """ @@ -12,14 +12,15 @@ # We also include this text in the docs by ``..include::`` in # ``docs/source/index.rst``. long_description = """ -Read and write access to common neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. -In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and morphometry files, -and provides some limited support for DICOM_. +Read and write access to common neuroimaging file formats, including: +ANALYZE_ (plain, SPM99, SPM2 and later), GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, +MINC1_, MINC2_, `AFNI BRIK/HEAD`_, ECAT_ and Philips PAR/REC. +In addition, NiBabel also supports FreeSurfer_'s MGH_, geometry, annotation and +morphometry files, and provides some limited support for DICOM_. -NiBabel's API gives full or selective access to header information (metadata), and image -data is made available via NumPy arrays. For more information, see NiBabel's `documentation site`_ -and `API reference`_. +NiBabel's API gives full or selective access to header information (metadata), +and image data is made available via NumPy arrays. For more information, see +NiBabel's `documentation site`_ and `API reference`_. .. _API reference: https://nipy.org/nibabel/api.html .. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes @@ -54,7 +55,8 @@ git clone https://github.com/nipy/nibabel.git pip install -e ./nibabel -For more information on previous releases, see the `release archive`_ or `development changelog`_. +For more information on previous releases, see the `release archive`_ or +`development changelog`_. .. _current release: https://pypi.python.org/pypi/NiBabel .. _release archive: https://github.com/nipy/NiBabel/releases From 0c2dffdbf3e51db105e9a94401963a4ce76b0fb7 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sat, 18 Feb 2023 10:54:27 +0200 Subject: [PATCH 267/702] DOC: Homogenized module-level docstring formatting --- nibabel/affines.py | 5 ++--- nibabel/brikhead.py | 3 +-- nibabel/data.py | 4 +--- nibabel/deprecated.py | 3 +-- nibabel/deprecator.py | 3 +-- nibabel/dft.py | 3 +-- nibabel/environment.py | 4 +--- nibabel/filebasedimages.py | 2 +- nibabel/fileslice.py | 3 +-- nibabel/fileutils.py | 3 +-- nibabel/imagestats.py | 4 +--- nibabel/mriutils.py | 4 +--- nibabel/onetime.py | 3 +-- nibabel/openers.py | 3 +-- nibabel/parrec.py | 2 +- nibabel/processing.py | 12 +++++++----- nibabel/quaternions.py | 2 +- nibabel/tmpdirs.py | 3 +-- nibabel/tripwire.py | 3 +-- nibabel/viewers.py | 2 +- nibabel/xmlutils.py | 4 +--- 21 files changed, 28 insertions(+), 47 deletions(-) diff --git a/nibabel/affines.py b/nibabel/affines.py index 59b52e768e..d6c101ddd5 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""Utility routines for working with points and affine transforms -""" +"""Utility routines for working with points and affine transforms""" from functools import reduce import numpy as np @@ -313,7 +312,7 @@ def voxel_sizes(affine): def obliquity(affine): r""" - Estimate the *obliquity* an affine's axes represent. + Estimate the *obliquity* an affine's axes represent The term *obliquity* is defined here as the rotation of those axes with respect to the cardinal axes. diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index f375b541dc..ee5f766722 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Class for reading AFNI BRIK/HEAD datasets +"""Class for reading AFNI BRIK/HEAD datasets See https://afni.nimh.nih.gov/pub/dist/doc/program_help/README.attributes.html for information on what is required to have a valid BRIK/HEAD dataset. diff --git a/nibabel/data.py b/nibabel/data.py index 42826d2f67..7e2fe2af70 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Utilities to find files from NIPY data packages -""" +"""Utilities to find files from NIPY data packages""" import configparser import glob import os diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index c353071954..092370106e 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -1,5 +1,4 @@ -"""Module to help with deprecating objects and classes -""" +"""Module to help with deprecating objects and classes""" from __future__ import annotations import typing as ty diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 3ef6b45066..779fdb462d 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -1,5 +1,4 @@ -"""Class for recording and reporting deprecations -""" +"""Class for recording and reporting deprecations""" from __future__ import annotations import functools diff --git a/nibabel/dft.py b/nibabel/dft.py index c805128951..7a49d49f52 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -7,8 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove -"""DICOM filesystem tools -""" +"""DICOM filesystem tools""" import contextlib diff --git a/nibabel/environment.py b/nibabel/environment.py index 6f331eed5a..a828ccb865 100644 --- a/nibabel/environment.py +++ b/nibabel/environment.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Settings from the system environment relevant to NIPY -""" +"""Settings from the system environment relevant to NIPY""" import os from os.path import join as pjoin diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 556d8b75e5..6e4ea86135 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Common interface for any image format--volume or surface, binary or xml.""" +"""Common interface for any image format--volume or surface, binary or xml""" from __future__ import annotations import io diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 87cac05a4a..816f1cdaf6 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -1,5 +1,4 @@ -"""Utilities for getting array slices out of file-like objects -""" +"""Utilities for getting array slices out of file-like objects""" import operator from functools import reduce diff --git a/nibabel/fileutils.py b/nibabel/fileutils.py index da44fe51a9..1defbc62f7 100644 --- a/nibabel/fileutils.py +++ b/nibabel/fileutils.py @@ -6,8 +6,7 @@ # copyright and license terms. # # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Utilities for reading and writing to binary file formats -""" +"""Utilities for reading and writing to binary file formats""" def read_zt_byte_strings(fobj, n_strings=1, bufsize=1024): diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index 6f1b68178b..36fbddee0e 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -6,9 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Functions for computing image statistics -""" +"""Functions for computing image statistics""" import numpy as np diff --git a/nibabel/mriutils.py b/nibabel/mriutils.py index d993d26a21..09067cc1e9 100644 --- a/nibabel/mriutils.py +++ b/nibabel/mriutils.py @@ -6,9 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Utilities for calculations related to MRI -""" +"""Utilities for calculations related to MRI""" __all__ = ['calculate_dwell_time'] diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 7c723d4c83..e365e81f74 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -1,5 +1,4 @@ -""" -Descriptor support for NIPY. +"""Descriptor support for NIPY Utilities to support special Python descriptors [1,2], in particular the use of a useful pattern for properties we call 'one time properties'. These are diff --git a/nibabel/openers.py b/nibabel/openers.py index d75839fe1a..d11c8834a4 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Context manager openers for various fileobject types -""" +"""Context manager openers for various fileobject types""" import gzip import warnings diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 7c594dcb45..086f2a79d2 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -8,7 +8,7 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Disable line length checking for PAR fragments in module docstring # flake8: noqa E501 -"""Read images in PAR/REC format. +"""Read images in PAR/REC format This is yet another MRI image format generated by Philips scanners. It is an ASCII header (PAR) plus a binary blob (REC). diff --git a/nibabel/processing.py b/nibabel/processing.py index c7bd3888de..6027575d47 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -6,13 +6,15 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Image processing functions for: +"""Image processing functions -* smoothing -* resampling -* converting sd to and from FWHM +Image processing functions for: -Smoothing and resampling routines need scipy + * smoothing + * resampling + * converting SD to and from FWHM + +Smoothing and resampling routines need scipy. """ import numpy as np diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 04c570c84b..9732bc5c63 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -7,7 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ -Functions to operate on, or return, quaternions. +Functions to operate on, or return, quaternions The module also includes functions for the closely related angle, axis pair as a specification for rotation. diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 3074fca6f2..7fe47e6510 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Contexts for *with* statement providing temporary directories -""" +"""Contexts for *with* statement providing temporary directories""" import os import tempfile from contextlib import contextmanager diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index d0c3d4c50c..fa45e73382 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -1,5 +1,4 @@ -"""Class to raise error for missing modules or other misfortunes -""" +"""Class to raise error for missing modules or other misfortunes""" from typing import Any diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 9dad3dd17f..5138610fe4 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -14,7 +14,7 @@ class OrthoSlicer3D: - """Orthogonal-plane slice viewer. + """Orthogonal-plane slice viewer OrthoSlicer3d expects 3- or 4-dimensional array data. It treats 4D data as a sequence of 3D spatial volumes, where a slice over the final diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 8e0b18fb6e..9b47d81381 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -6,9 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Thin layer around xml.etree.ElementTree, to abstract nibabel xml support. -""" +"""Thin layer around xml.etree.ElementTree, to abstract nibabel xml support""" from io import BytesIO from xml.etree.ElementTree import Element, SubElement, tostring # noqa From e7dc5fee1d847504c6c764b1030cc91af9953f48 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sat, 18 Feb 2023 11:04:58 +0200 Subject: [PATCH 268/702] DOC: Removed spacing between module docstrings and imports --- nibabel/arraywriters.py | 1 - nibabel/ecat.py | 1 - nibabel/environment.py | 1 - nibabel/eulerangles.py | 1 - nibabel/fileholders.py | 1 - nibabel/filename_parser.py | 1 - nibabel/fileslice.py | 1 - nibabel/imageclasses.py | 1 - nibabel/imageglobals.py | 1 - nibabel/imagestats.py | 1 - nibabel/loadsave.py | 1 - nibabel/nifti2.py | 1 - nibabel/openers.py | 1 - nibabel/orientations.py | 2 -- nibabel/parrec.py | 1 - nibabel/processing.py | 1 - nibabel/quaternions.py | 1 - nibabel/rstutils.py | 1 - nibabel/spaces.py | 1 - nibabel/viewers.py | 1 - nibabel/xmlutils.py | 1 - 21 files changed, 22 deletions(-) diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 5a0b04925e..bdd2d548f8 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -28,7 +28,6 @@ def __init__(self, array, out_dtype=None) something else to make sense of conversions between float and int, or between larger ints and smaller. """ - import numpy as np from .casting import ( diff --git a/nibabel/ecat.py b/nibabel/ecat.py index f1a40dd27c..23a58f752e 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -42,7 +42,6 @@ GPL and some of the header files are adapted from CTI files (called CTI code below). It's not clear what the licenses are for these files. """ - import warnings from numbers import Integral diff --git a/nibabel/environment.py b/nibabel/environment.py index a828ccb865..09aaa6320f 100644 --- a/nibabel/environment.py +++ b/nibabel/environment.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Settings from the system environment relevant to NIPY""" - import os from os.path import join as pjoin diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index b1d187e8c1..13dc059644 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -82,7 +82,6 @@ ``y``, followed by rotation around ``x``, is known (confusingly) as "xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles. """ - import math from functools import reduce diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index f2ec992da5..691d31ecff 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Fileholder class""" - from copy import copy from .openers import ImageOpener diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 77949a6791..c4e47ee72c 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Create filename pairs, triplets etc, with expected extensions""" - import os import pathlib diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 816f1cdaf6..fe7d6bba54 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -1,5 +1,4 @@ """Utilities for getting array slices out of file-like objects""" - import operator from functools import reduce from mmap import mmap diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index ac27a6ecac..e2dbed129d 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Define supported image classes and names""" - from .analyze import AnalyzeImage from .brikhead import AFNIImage from .cifti2 import Cifti2Image diff --git a/nibabel/imageglobals.py b/nibabel/imageglobals.py index 81a1742809..551719a7ee 100644 --- a/nibabel/imageglobals.py +++ b/nibabel/imageglobals.py @@ -23,7 +23,6 @@ Use ``logger.level = 1`` to see all messages. """ - import logging error_level = 40 diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index 36fbddee0e..38dc9d3f16 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Functions for computing image statistics""" - import numpy as np from nibabel.imageclasses import spatial_axes_first diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 6c1981ca77..f12b81b30b 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # module imports """Utilities to load and save image objects""" - import os import numpy as np diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 9c898b47ba..8d9b81e1f9 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -12,7 +12,6 @@ https://www.nitrc.org/forum/message.php?msg_id=3738 """ - import numpy as np from .analyze import AnalyzeHeader diff --git a/nibabel/openers.py b/nibabel/openers.py index d11c8834a4..5f2bb0cde7 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Context manager openers for various fileobject types""" - import gzip import warnings from bz2 import BZ2File diff --git a/nibabel/orientations.py b/nibabel/orientations.py index f9e1ea028c..075cbd4ffd 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -7,8 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for calculating and applying affine orientations""" - - import numpy as np import numpy.linalg as npl diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 086f2a79d2..22219382c8 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -121,7 +121,6 @@ utility via the option "--strict-sort". The dimension info can be exported to a CSV file by adding the option "--volume-info". """ - import re import warnings from collections import OrderedDict diff --git a/nibabel/processing.py b/nibabel/processing.py index 6027575d47..d634ce7086 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -16,7 +16,6 @@ Smoothing and resampling routines need scipy. """ - import numpy as np import numpy.linalg as npl diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 9732bc5c63..ec40660607 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -25,7 +25,6 @@ >>> vec = np.array([1, 2, 3]).reshape((3,1)) # column vector >>> tvec = np.dot(M, vec) """ - import math import numpy as np diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index cb40633e54..625a2af477 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -2,7 +2,6 @@ * Make ReST table given array of values """ - import numpy as np diff --git a/nibabel/spaces.py b/nibabel/spaces.py index d06a39b0ed..e5b87171df 100644 --- a/nibabel/spaces.py +++ b/nibabel/spaces.py @@ -19,7 +19,6 @@ mapping), or * a length 2 sequence with the same information (shape, affine). """ - from itertools import product import numpy as np diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 5138610fe4..f2b32a1fd9 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -3,7 +3,6 @@ Includes version of OrthoSlicer3D code originally written by our own Paul Ivanov. """ - import weakref import numpy as np diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 9b47d81381..31637b5e0c 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Thin layer around xml.etree.ElementTree, to abstract nibabel xml support""" - from io import BytesIO from xml.etree.ElementTree import Element, SubElement, tostring # noqa from xml.parsers.expat import ParserCreate From 7903364b86bb4d592e60895f36f2a085379f58b6 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sat, 18 Feb 2023 11:14:36 +0200 Subject: [PATCH 269/702] DOC: Minor docstring formatting fixes to functions --- nibabel/affines.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nibabel/affines.py b/nibabel/affines.py index d6c101ddd5..05fdd7bb58 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -99,7 +99,7 @@ def apply_affine(aff, pts, inplace=False): def to_matvec(transform): - """Split a transform into its matrix and vector components. + """Split a transform into its matrix and vector components The transformation must be represented in homogeneous coordinates and is split into its rotation matrix and translation vector components. @@ -311,8 +311,7 @@ def voxel_sizes(affine): def obliquity(affine): - r""" - Estimate the *obliquity* an affine's axes represent + r"""Estimate the *obliquity* an affine's axes represent The term *obliquity* is defined here as the rotation of those axes with respect to the cardinal axes. From 8c43ffe616fa56df8aca747237411887fcd89435 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 13 Feb 2023 21:31:49 -0500 Subject: [PATCH 270/702] TYP: Annotate openers Opener proxy methods now match io.BufferedIOBase prototypes. Remove some version checks for indexed-gzip < 0.8, which supported Python 3.6 while our minimum is now 3.8. A runtime-checkable protocol for .read()/.write() was the easiest way to accommodate weird file-likes that aren't IOBases. When indexed-gzip is typed, we may need to adjust the output of _gzip_open. --- nibabel/openers.py | 181 +++++++++++++++++++++------------- nibabel/tests/test_openers.py | 2 +- 2 files changed, 116 insertions(+), 67 deletions(-) diff --git a/nibabel/openers.py b/nibabel/openers.py index 5f2bb0cde7..3e3b2fb29f 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -7,34 +7,48 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Context manager openers for various fileobject types""" +from __future__ import annotations + import gzip -import warnings +import io +import typing as ty from bz2 import BZ2File from os.path import splitext -from packaging.version import Version - from nibabel.optpkg import optional_package -# is indexed_gzip present and modern? -try: - import indexed_gzip as igzip # type: ignore +if ty.TYPE_CHECKING: # pragma: no cover + from types import TracebackType - version = igzip.__version__ + import pyzstd + from _typeshed import WriteableBuffer - HAVE_INDEXED_GZIP = True + ModeRT = ty.Literal['r', 'rt'] + ModeRB = ty.Literal['rb'] + ModeWT = ty.Literal['w', 'wt'] + ModeWB = ty.Literal['wb'] + ModeR = ty.Union[ModeRT, ModeRB] + ModeW = ty.Union[ModeWT, ModeWB] + Mode = ty.Union[ModeR, ModeW] - # < 0.7 - no good - if Version(version) < Version('0.7.0'): - warnings.warn(f'indexed_gzip is present, but too old (>= 0.7.0 required): {version})') - HAVE_INDEXED_GZIP = False - # >= 0.8 SafeIndexedGzipFile renamed to IndexedGzipFile - elif Version(version) < Version('0.8.0'): - IndexedGzipFile = igzip.SafeIndexedGzipFile - else: - IndexedGzipFile = igzip.IndexedGzipFile - del igzip, version + OpenerDef = tuple[ty.Callable[..., io.IOBase], tuple[str, ...]] +else: + pyzstd = optional_package('pyzstd')[0] + + +@ty.runtime_checkable +class Fileish(ty.Protocol): + def read(self, size: int = -1, /) -> bytes: + ... # pragma: no cover + + def write(self, b: bytes, /) -> int | None: + ... # pragma: no cover + + +try: + from indexed_gzip import IndexedGzipFile # type: ignore + HAVE_INDEXED_GZIP = True except ImportError: # nibabel.openers.IndexedGzipFile is imported by nibabel.volumeutils # to detect compressed file types, so we give a fallback value here. @@ -49,35 +63,63 @@ class DeterministicGzipFile(gzip.GzipFile): to a modification time (``mtime``) of 0 seconds. """ - def __init__(self, filename=None, mode=None, compresslevel=9, fileobj=None, mtime=0): - # These two guards are copied from + def __init__( + self, + filename: str | None = None, + mode: Mode | None = None, + compresslevel: int = 9, + fileobj: io.FileIO | None = None, + mtime: int = 0, + ): + if mode is None: + mode = 'rb' + modestr: str = mode + + # These two guards are adapted from # https://github.com/python/cpython/blob/6ab65c6/Lib/gzip.py#L171-L174 - if mode and 'b' not in mode: - mode += 'b' + if 'b' not in modestr: + modestr = f'{mode}b' if fileobj is None: - fileobj = self.myfileobj = open(filename, mode or 'rb') + if filename is None: + raise TypeError('Must define either fileobj or filename') + # Cast because GzipFile.myfileobj has type io.FileIO while open returns ty.IO + fileobj = self.myfileobj = ty.cast(io.FileIO, open(filename, modestr)) return super().__init__( - filename='', mode=mode, compresslevel=compresslevel, fileobj=fileobj, mtime=mtime + filename='', + mode=modestr, + compresslevel=compresslevel, + fileobj=fileobj, + mtime=mtime, ) -def _gzip_open(filename, mode='rb', compresslevel=9, mtime=0, keep_open=False): +def _gzip_open( + filename: str, + mode: Mode = 'rb', + compresslevel: int = 9, + mtime: int = 0, + keep_open: bool = False, +) -> gzip.GzipFile: + + if not HAVE_INDEXED_GZIP or mode != 'rb': + gzip_file = DeterministicGzipFile(filename, mode, compresslevel, mtime=mtime) # use indexed_gzip if possible for faster read access. If keep_open == # True, we tell IndexedGzipFile to keep the file handle open. Otherwise # the IndexedGzipFile will close/open the file on each read. - if HAVE_INDEXED_GZIP and mode == 'rb': - gzip_file = IndexedGzipFile(filename, drop_handles=not keep_open) - - # Fall-back to built-in GzipFile else: - gzip_file = DeterministicGzipFile(filename, mode, compresslevel, mtime=mtime) + gzip_file = IndexedGzipFile(filename, drop_handles=not keep_open) return gzip_file -def _zstd_open(filename, mode='r', *, level_or_option=None, zstd_dict=None): - pyzstd = optional_package('pyzstd')[0] +def _zstd_open( + filename: str, + mode: Mode = 'r', + *, + level_or_option: int | dict | None = None, + zstd_dict: pyzstd.ZstdDict | None = None, +) -> pyzstd.ZstdFile: return pyzstd.ZstdFile(filename, mode, level_or_option=level_or_option, zstd_dict=zstd_dict) @@ -104,7 +146,7 @@ class Opener: gz_def = (_gzip_open, ('mode', 'compresslevel', 'mtime', 'keep_open')) bz2_def = (BZ2File, ('mode', 'buffering', 'compresslevel')) zstd_def = (_zstd_open, ('mode', 'level_or_option', 'zstd_dict')) - compress_ext_map = { + compress_ext_map: dict[str | None, OpenerDef] = { '.gz': gz_def, '.bz2': bz2_def, '.zst': zstd_def, @@ -121,19 +163,19 @@ class Opener: 'w': default_zst_compresslevel, } #: whether to ignore case looking for compression extensions - compress_ext_icase = True + compress_ext_icase: bool = True + + fobj: io.IOBase - def __init__(self, fileish, *args, **kwargs): - if self._is_fileobj(fileish): + def __init__(self, fileish: str | io.IOBase, *args, **kwargs): + if isinstance(fileish, (io.IOBase, Fileish)): self.fobj = fileish self.me_opened = False - self._name = None + self._name = getattr(fileish, 'name', None) return opener, arg_names = self._get_opener_argnames(fileish) # Get full arguments to check for mode and compresslevel - full_kwargs = kwargs.copy() - n_args = len(args) - full_kwargs.update(dict(zip(arg_names[:n_args], args))) + full_kwargs = {**kwargs, **dict(zip(arg_names, args))} # Set default mode if 'mode' not in full_kwargs: mode = 'rb' @@ -155,7 +197,7 @@ def __init__(self, fileish, *args, **kwargs): self._name = fileish self.me_opened = True - def _get_opener_argnames(self, fileish): + def _get_opener_argnames(self, fileish: str) -> OpenerDef: _, ext = splitext(fileish) if self.compress_ext_icase: ext = ext.lower() @@ -168,16 +210,12 @@ def _get_opener_argnames(self, fileish): return self.compress_ext_map[ext] return self.compress_ext_map[None] - def _is_fileobj(self, obj): - """Is `obj` a file-like object?""" - return hasattr(obj, 'read') and hasattr(obj, 'write') - @property - def closed(self): + def closed(self) -> bool: return self.fobj.closed @property - def name(self): + def name(self) -> str | None: """Return ``self.fobj.name`` or self._name if not present self._name will be None if object was created with a fileobj, otherwise @@ -186,42 +224,53 @@ def name(self): return self._name @property - def mode(self): - return self.fobj.mode + def mode(self) -> str: + # Check and raise our own error for type narrowing purposes + if hasattr(self.fobj, 'mode'): + return self.fobj.mode + raise AttributeError(f'{self.fobj.__class__.__name__} has no attribute "mode"') - def fileno(self): + def fileno(self) -> int: return self.fobj.fileno() - def read(self, *args, **kwargs): - return self.fobj.read(*args, **kwargs) + def read(self, size: int = -1, /) -> bytes: + return self.fobj.read(size) - def readinto(self, *args, **kwargs): - return self.fobj.readinto(*args, **kwargs) + def readinto(self, buffer: WriteableBuffer, /) -> int | None: + # Check and raise our own error for type narrowing purposes + if hasattr(self.fobj, 'readinto'): + return self.fobj.readinto(buffer) + raise AttributeError(f'{self.fobj.__class__.__name__} has no attribute "readinto"') - def write(self, *args, **kwargs): - return self.fobj.write(*args, **kwargs) + def write(self, b: bytes, /) -> int | None: + return self.fobj.write(b) - def seek(self, *args, **kwargs): - return self.fobj.seek(*args, **kwargs) + def seek(self, pos: int, whence: int = 0, /) -> int: + return self.fobj.seek(pos, whence) - def tell(self, *args, **kwargs): - return self.fobj.tell(*args, **kwargs) + def tell(self, /) -> int: + return self.fobj.tell() - def close(self, *args, **kwargs): - return self.fobj.close(*args, **kwargs) + def close(self, /) -> None: + return self.fobj.close() - def __iter__(self): + def __iter__(self) -> ty.Iterator[bytes]: return iter(self.fobj) - def close_if_mine(self): + def close_if_mine(self) -> None: """Close ``self.fobj`` iff we opened it in the constructor""" if self.me_opened: self.close() - def __enter__(self): + def __enter__(self) -> Opener: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: self.close_if_mine() diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index b4f71f2501..893c5f4f88 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -38,7 +38,7 @@ def __init__(self, message): def write(self): pass - def read(self): + def read(self, size=-1, /): return self.message From ece10ac88ebaa7346e4fdf87fc875cc6aa02ba59 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 13 Feb 2023 23:39:35 -0500 Subject: [PATCH 271/702] TYP: Annotate fileholders --- nibabel/filebasedimages.py | 3 +-- nibabel/fileholders.py | 27 ++++++++++++++++++--------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 6e4ea86135..7e289bfa48 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -16,12 +16,11 @@ from typing import Type from urllib import request -from .fileholders import FileHolder +from .fileholders import FileHolder, FileMap from .filename_parser import TypesFilenamesError, splitext_addext, types_filenames from .openers import ImageOpener FileSpec = ty.Union[str, os.PathLike] -FileMap = ty.Mapping[str, FileHolder] FileSniff = ty.Tuple[bytes, str] ImgT = ty.TypeVar('ImgT', bound='FileBasedImage') diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index 691d31ecff..a27715350d 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -7,6 +7,10 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Fileholder class""" +from __future__ import annotations + +import io +import typing as ty from copy import copy from .openers import ImageOpener @@ -19,7 +23,12 @@ class FileHolderError(Exception): class FileHolder: """class to contain filename, fileobj and file position""" - def __init__(self, filename=None, fileobj=None, pos=0): + def __init__( + self, + filename: str | None = None, + fileobj: io.IOBase | None = None, + pos: int = 0, + ): """Initialize FileHolder instance Parameters @@ -37,7 +46,7 @@ def __init__(self, filename=None, fileobj=None, pos=0): self.fileobj = fileobj self.pos = pos - def get_prepare_fileobj(self, *args, **kwargs): + def get_prepare_fileobj(self, *args, **kwargs) -> ImageOpener: """Return fileobj if present, or return fileobj from filename Set position to that given in self.pos @@ -69,7 +78,7 @@ def get_prepare_fileobj(self, *args, **kwargs): raise FileHolderError('No filename or fileobj present') return obj - def same_file_as(self, other): + def same_file_as(self, other: FileHolder) -> bool: """Test if `self` refers to same files / fileobj as `other` Parameters @@ -86,12 +95,15 @@ def same_file_as(self, other): return (self.filename == other.filename) and (self.fileobj == other.fileobj) @property - def file_like(self): + def file_like(self) -> str | io.IOBase | None: """Return ``self.fileobj`` if not None, otherwise ``self.filename``""" return self.fileobj if self.fileobj is not None else self.filename -def copy_file_map(file_map): +FileMap = ty.Mapping[str, FileHolder] + + +def copy_file_map(file_map: FileMap) -> FileMap: r"""Copy mapping of fileholders given by `file_map` Parameters @@ -105,7 +117,4 @@ def copy_file_map(file_map): Copy of `file_map`, using shallow copy of ``FileHolder``\s """ - fm_copy = {} - for key, fh in file_map.items(): - fm_copy[key] = copy(fh) - return fm_copy + return {key: copy(fh) for key, fh in file_map.items()} From d13768f803ed9975c9ea8a3f0d5e82ddf187be03 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 14 Feb 2023 08:50:47 -0500 Subject: [PATCH 272/702] TYP: Annotated filename_parser, move typedefs from filebasedimages --- nibabel/dataobj_images.py | 5 ++- nibabel/filebasedimages.py | 11 ++++--- nibabel/filename_parser.py | 66 +++++++++++++++++++++----------------- nibabel/spatialimages.py | 3 +- 4 files changed, 48 insertions(+), 37 deletions(-) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index f23daf5d8d..eaf341271e 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -15,11 +15,14 @@ from .arrayproxy import ArrayLike from .deprecated import deprecate_with_version -from .filebasedimages import FileBasedHeader, FileBasedImage, FileMap, FileSpec +from .filebasedimages import FileBasedHeader, FileBasedImage +from .fileholders import FileMap if ty.TYPE_CHECKING: # pragma: no cover import numpy.typing as npt + from .filename_parser import FileSpec + ArrayImgT = ty.TypeVar('ArrayImgT', bound='DataobjImage') diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 7e289bfa48..685b11b79b 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -10,17 +10,18 @@ from __future__ import annotations import io -import os import typing as ty from copy import deepcopy from typing import Type from urllib import request from .fileholders import FileHolder, FileMap -from .filename_parser import TypesFilenamesError, splitext_addext, types_filenames +from .filename_parser import TypesFilenamesError, _stringify_path, splitext_addext, types_filenames from .openers import ImageOpener -FileSpec = ty.Union[str, os.PathLike] +if ty.TYPE_CHECKING: # pragma: no cover + from .filename_parser import ExtensionSpec, FileSpec + FileSniff = ty.Tuple[bytes, str] ImgT = ty.TypeVar('ImgT', bound='FileBasedImage') @@ -159,7 +160,7 @@ class FileBasedImage: header_class: Type[FileBasedHeader] = FileBasedHeader _header: FileBasedHeader _meta_sniff_len: int = 0 - files_types: tuple[tuple[str, str | None], ...] = (('image', None),) + files_types: tuple[ExtensionSpec, ...] = (('image', None),) valid_exts: tuple[str, ...] = () _compressed_suffixes: tuple[str, ...] = () @@ -410,7 +411,7 @@ def _sniff_meta_for( t_fnames = types_filenames( filename, klass.files_types, trailing_suffixes=klass._compressed_suffixes ) - meta_fname = t_fnames.get('header', filename) + meta_fname = t_fnames.get('header', _stringify_path(filename)) # Do not re-sniff if it would be from the same file if sniff is not None and sniff[1] == meta_fname: diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index c4e47ee72c..45c50d6830 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -7,15 +7,21 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Create filename pairs, triplets etc, with expected extensions""" +from __future__ import annotations + import os -import pathlib +import typing as ty + +if ty.TYPE_CHECKING: # pragma: no cover + FileSpec = str | os.PathLike[str] + ExtensionSpec = tuple[str, str | None] class TypesFilenamesError(Exception): pass -def _stringify_path(filepath_or_buffer): +def _stringify_path(filepath_or_buffer: FileSpec) -> str: """Attempt to convert a path-like object to a string. Parameters @@ -28,30 +34,21 @@ def _stringify_path(filepath_or_buffer): Notes ----- - Objects supporting the fspath protocol (python 3.6+) are coerced - according to its __fspath__ method. - For backwards compatibility with older pythons, pathlib.Path objects - are specially coerced. - Any other object is passed through unchanged, which includes bytes, - strings, buffers, or anything else that's not even path-like. - - Copied from: - https://github.com/pandas-dev/pandas/blob/325dd686de1589c17731cf93b649ed5ccb5a99b4/pandas/io/common.py#L131-L160 + Adapted from: + https://github.com/pandas-dev/pandas/blob/325dd68/pandas/io/common.py#L131-L160 """ - if hasattr(filepath_or_buffer, '__fspath__'): + if isinstance(filepath_or_buffer, os.PathLike): return filepath_or_buffer.__fspath__() - elif isinstance(filepath_or_buffer, pathlib.Path): - return str(filepath_or_buffer) return filepath_or_buffer def types_filenames( - template_fname, - types_exts, - trailing_suffixes=('.gz', '.bz2'), - enforce_extensions=True, - match_case=False, -): + template_fname: FileSpec, + types_exts: ty.Sequence[ExtensionSpec], + trailing_suffixes: ty.Sequence[str] = ('.gz', '.bz2'), + enforce_extensions: bool = True, + match_case: bool = False, +) -> dict[str, str]: """Return filenames with standard extensions from template name The typical case is returning image and header filenames for an @@ -152,12 +149,12 @@ def types_filenames( # we've found .IMG as the extension, we want .HDR as the matching # one. Let's only do this when the extension is all upper or all # lower case. - proc_ext = lambda s: s + proc_ext: ty.Callable[[str], str] = lambda s: s if found_ext: if found_ext == found_ext.upper(): - proc_ext = lambda s: s.upper() + proc_ext = str.upper elif found_ext == found_ext.lower(): - proc_ext = lambda s: s.lower() + proc_ext = str.lower for name, ext in types_exts: if name == direct_set_name: tfns[name] = template_fname @@ -171,7 +168,12 @@ def types_filenames( return tfns -def parse_filename(filename, types_exts, trailing_suffixes, match_case=False): +def parse_filename( + filename: FileSpec, + types_exts: ty.Sequence[ExtensionSpec], + trailing_suffixes: ty.Sequence[str], + match_case: bool = False, +) -> tuple[str, str, str | None, str | None]: """Split filename into fileroot, extension, trailing suffix; guess type. Parameters @@ -230,9 +232,9 @@ def parse_filename(filename, types_exts, trailing_suffixes, match_case=False): break guessed_name = None found_ext = None - for name, ext in types_exts: - if ext and endswith(filename, ext): - extpos = -len(ext) + for name, type_ext in types_exts: + if type_ext and endswith(filename, type_ext): + extpos = -len(type_ext) found_ext = filename[extpos:] filename = filename[:extpos] guessed_name = name @@ -242,15 +244,19 @@ def parse_filename(filename, types_exts, trailing_suffixes, match_case=False): return (filename, found_ext, ignored, guessed_name) -def _endswith(whole, end): +def _endswith(whole: str, end: str) -> bool: return whole.endswith(end) -def _iendswith(whole, end): +def _iendswith(whole: str, end: str) -> bool: return whole.lower().endswith(end.lower()) -def splitext_addext(filename, addexts=('.gz', '.bz2', '.zst'), match_case=False): +def splitext_addext( + filename: FileSpec, + addexts: ty.Sequence[str] = ('.gz', '.bz2', '.zst'), + match_case: bool = False, +) -> tuple[str, str, str]: """Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` where ``.gz`` may be any of passed `addext` trailing suffixes. diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 4f3648c4d6..be347bd86f 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -140,7 +140,8 @@ from .arrayproxy import ArrayLike from .dataobj_images import DataobjImage -from .filebasedimages import FileBasedHeader, FileBasedImage, FileMap +from .filebasedimages import FileBasedHeader, FileBasedImage +from .fileholders import FileMap from .fileslice import canonical_slicers from .orientations import apply_orientation, inv_ornt_aff from .viewers import OrthoSlicer3D From 6df4a95b028a7c7219ac4bff74448f5b50a04b60 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 17 Feb 2023 08:33:29 -0500 Subject: [PATCH 273/702] FIX: Disable direct creation of non-conformant GiftiDataArrays --- nibabel/gifti/gifti.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 326e60fa2e..abaa81c085 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -460,7 +460,21 @@ def __init__( self.data = None if data is None else np.asarray(data) self.intent = intent_codes.code[intent] if datatype is None: - datatype = 'none' if self.data is None else self.data.dtype + if self.data is None: + datatype = 'none' + elif self.data.dtype in ( + np.dtype('uint8'), + np.dtype('int32'), + np.dtype('float32'), + ): + datatype = self.data.dtype + else: + raise ValueError( + f'Data array has type {self.data.dtype}. ' + 'The GIFTI standard only supports uint8, int32 and float32 arrays.\n' + 'Explicitly cast the data array to a supported dtype or pass an ' + 'explicit "datatype" parameter to GiftiDataArray().' + ) self.datatype = data_type_codes.code[datatype] self.encoding = gifti_encoding_codes.code[encoding] self.endian = gifti_endian_codes.code[endian] From b9ef70a41cdaf52d59cd2b73894f9d55443c13d1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 17 Feb 2023 10:25:12 -0500 Subject: [PATCH 274/702] TEST: Validate GiftiDataArray construction wrt types --- nibabel/gifti/tests/test_gifti.py | 32 +++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index cd87bcfeea..96fc23e613 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -195,6 +195,38 @@ def test_dataarray_init(): assert gda(ext_offset=12).ext_offset == 12 +@pytest.mark.parametrize('label', data_type_codes.value_set('label')) +def test_dataarray_typing(label): + dtype = data_type_codes.dtype[label] + code = data_type_codes.code[label] + arr = np.zeros((5,), dtype=dtype) + + # Default interface: accept standards-conformant arrays, reject else + if dtype in ('uint8', 'int32', 'float32'): + assert GiftiDataArray(arr).datatype == code + else: + with pytest.raises(ValueError): + GiftiDataArray(arr) + + # Explicit override - permit for now, may want to warn or eventually + # error + assert GiftiDataArray(arr, datatype=label).datatype == code + assert GiftiDataArray(arr, datatype=code).datatype == code + # Void is how we say we don't know how to do something, so it's not unique + if dtype != np.dtype('void'): + assert GiftiDataArray(arr, datatype=dtype).datatype == code + + # Side-load data array (as in parsing) + # We will probably always want this to load legacy images, but it's + # probably not ideal to make it easy to silently propagate nonconformant + # arrays + gda = GiftiDataArray() + gda.data = arr + gda.datatype = data_type_codes.code[label] + assert gda.data.dtype == dtype + assert gda.datatype == data_type_codes.code[label] + + def test_labeltable(): img = GiftiImage() assert len(img.labeltable.labels) == 0 From 89d20b2c23b0e8831f9a11a81d78efa372ad6ab4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 17 Feb 2023 11:53:32 -0500 Subject: [PATCH 275/702] TEST: Upgrade to new PRNG interface and cast output when needed --- nibabel/gifti/tests/test_gifti.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 96fc23e613..0341c571e3 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -33,6 +33,8 @@ DATA_FILE6, ) +rng = np.random.default_rng() + def test_agg_data(): surf_gii_img = load(get_test_data('gifti', 'ascii.gii')) @@ -81,7 +83,7 @@ def test_gifti_image(): assert gi.numDA == 0 # Test from numpy numeric array - data = np.random.random((5,)) + data = rng.random(5, dtype=np.float32) da = GiftiDataArray(data) gi.add_gifti_data_array(da) assert gi.numDA == 1 @@ -98,7 +100,7 @@ def test_gifti_image(): # Remove one gi = GiftiImage() - da = GiftiDataArray(np.zeros((5,)), intent=0) + da = GiftiDataArray(np.zeros((5,), np.float32), intent=0) gi.add_gifti_data_array(da) gi.remove_gifti_data_array_by_intent(3) @@ -335,7 +337,7 @@ def test_metadata_list_interface(): def test_gifti_label_rgba(): - rgba = np.random.rand(4) + rgba = rng.random(4) kwargs = dict(zip(['red', 'green', 'blue', 'alpha'], rgba)) gl1 = GiftiLabel(**kwargs) From f2c108477ee3c3b1637c7c6e7876c6f3c4dc96a6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 18 Feb 2023 14:26:32 -0500 Subject: [PATCH 276/702] ENH: Enforce GIFTI compatibility at write --- nibabel/gifti/gifti.py | 50 ++++++++++++++++++++++++------- nibabel/gifti/tests/test_gifti.py | 2 +- 2 files changed, 40 insertions(+), 12 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index abaa81c085..9dc2e42d62 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -16,7 +16,8 @@ import base64 import sys import warnings -from typing import Type +from copy import copy +from typing import Type, cast import numpy as np @@ -27,6 +28,12 @@ from ..nifti1 import data_type_codes, intent_codes, xform_codes from .util import KIND2FMT, array_index_order_codes, gifti_encoding_codes, gifti_endian_codes +GIFTI_DTYPES = ( + data_type_codes['NIFTI_TYPE_UINT8'], + data_type_codes['NIFTI_TYPE_INT32'], + data_type_codes['NIFTI_TYPE_FLOAT32'], +) + class _GiftiMDList(list): """List view of GiftiMetaData object that will translate most operations""" @@ -462,11 +469,7 @@ def __init__( if datatype is None: if self.data is None: datatype = 'none' - elif self.data.dtype in ( - np.dtype('uint8'), - np.dtype('int32'), - np.dtype('float32'), - ): + elif data_type_codes[self.data.dtype] in GIFTI_DTYPES: datatype = self.data.dtype else: raise ValueError( @@ -848,20 +851,45 @@ def _to_xml_element(self): GIFTI.append(dar._to_xml_element()) return GIFTI - def to_xml(self, enc='utf-8') -> bytes: + def to_xml(self, enc='utf-8', *, mode='strict') -> bytes: """Return XML corresponding to image content""" + if mode == 'strict': + if any(arr.datatype not in GIFTI_DTYPES for arr in self.darrays): + raise ValueError( + 'GiftiImage contains data arrays with invalid data types; ' + 'use mode="compat" to automatically cast to conforming types' + ) + elif mode == 'compat': + darrays = [] + for arr in self.darrays: + if arr.datatype not in GIFTI_DTYPES: + arr = copy(arr) + # TODO: Better typing for recoders + dtype = cast(np.dtype, data_type_codes.dtype[arr.datatype]) + if np.issubdtype(dtype, np.floating): + arr.datatype = data_type_codes['float32'] + elif np.issubdtype(dtype, np.integer): + arr.datatype = data_type_codes['int32'] + else: + raise ValueError(f'Cannot convert {dtype} to float32/int32') + darrays.append(arr) + gii = copy(self) + gii.darrays = darrays + return gii.to_xml(enc=enc, mode='strict') + elif mode != 'force': + raise TypeError(f'Unknown mode {mode}') header = b""" """ return header + super().to_xml(enc) # Avoid the indirection of going through to_file_map - def to_bytes(self, enc='utf-8'): - return self.to_xml(enc=enc) + def to_bytes(self, enc='utf-8', *, mode='strict'): + return self.to_xml(enc=enc, mode=mode) to_bytes.__doc__ = SerializableImage.to_bytes.__doc__ - def to_file_map(self, file_map=None, enc='utf-8'): + def to_file_map(self, file_map=None, enc='utf-8', *, mode='strict'): """Save the current image to the specified file_map Parameters @@ -877,7 +905,7 @@ def to_file_map(self, file_map=None, enc='utf-8'): if file_map is None: file_map = self.file_map with file_map['image'].get_prepare_fileobj('wb') as f: - f.write(self.to_xml(enc=enc)) + f.write(self.to_xml(enc=enc, mode=mode)) @classmethod def from_file_map(klass, file_map, buffer_size=35000000, mmap=True): diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 0341c571e3..e7050b93fa 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -505,7 +505,7 @@ def test_darray_dtype_coercion_failures(): datatype=darray_dtype, ) gii = GiftiImage(darrays=[da]) - gii_copy = GiftiImage.from_bytes(gii.to_bytes()) + gii_copy = GiftiImage.from_bytes(gii.to_bytes(mode='force')) da_copy = gii_copy.darrays[0] assert np.dtype(da_copy.data.dtype) == np.dtype(darray_dtype) assert_array_equal(da_copy.data, da.data) From fead0d5dc7fcbd3f07ad5c589a045b31f658e78f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 18 Feb 2023 14:42:28 -0500 Subject: [PATCH 277/702] DOCTEST: Catch deprecation warning in doctest --- nibabel/gifti/gifti.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 9dc2e42d62..56efa4ea0f 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -88,7 +88,8 @@ def _sanitize(args, kwargs): >>> GiftiMetaData({"key": "val"}) - >>> nvpairs = GiftiNVPairs(name='key', value='val') + >>> with pytest.deprecated_call(): + ... nvpairs = GiftiNVPairs(name='key', value='val') >>> with pytest.warns(FutureWarning): ... GiftiMetaData(nvpairs) From cf9cf150a9f2ddda7848c02c1125e12e3ddaa155 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 19 Feb 2023 16:48:35 -0500 Subject: [PATCH 278/702] TEST: Test write modes --- nibabel/gifti/tests/test_gifti.py | 38 +++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index e7050b93fa..4a7b27ece6 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -128,6 +128,44 @@ def assign_metadata(val): pytest.raises(TypeError, assign_metadata, 'not-a-meta') +@pytest.mark.parametrize('label', data_type_codes.value_set('label')) +def test_image_typing(label): + dtype = data_type_codes.dtype[label] + if dtype == np.void: + return + arr = 127 * rng.random( + 20, + ) + try: + cast = arr.astype(label) + except TypeError: + return + darr = GiftiDataArray(cast, datatype=label) + img = GiftiImage(darrays=[darr]) + + # Force-write always works + force_rt = img.from_bytes(img.to_bytes(mode='force')) + assert np.array_equal(cast, force_rt.darrays[0].data) + + # Compatibility mode does its best + if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.floating): + compat_rt = img.from_bytes(img.to_bytes(mode='compat')) + compat_darr = compat_rt.darrays[0].data + assert np.allclose(cast, compat_darr) + assert compat_darr.dtype in ('uint8', 'int32', 'float32') + else: + with pytest.raises(ValueError): + img.to_bytes(mode='compat') + + # Strict mode either works or fails + if label in ('uint8', 'int32', 'float32'): + strict_rt = img.from_bytes(img.to_bytes(mode='strict')) + assert np.array_equal(cast, strict_rt.darrays[0].data) + else: + with pytest.raises(ValueError): + img.to_bytes(mode='strict') + + def test_dataarray_empty(): # Test default initialization of DataArray null_da = GiftiDataArray() From b400dd547254083b8e27e4f0e87a899bcc6c40c8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 19 Feb 2023 16:53:07 -0500 Subject: [PATCH 279/702] TEST: Capture stdout in some GIFTI tests --- nibabel/gifti/tests/test_gifti.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 4a7b27ece6..d4fddf4049 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -404,13 +404,17 @@ def assign_rgba(gl, val): assert np.all([elem is None for elem in gl4.rgba]) -def test_print_summary(): - for fil in [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6]: - gimg = load(fil) - gimg.print_summary() +@pytest.mark.parametrize( + 'fname', [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6] +) +def test_print_summary(fname, capsys): + gimg = load(fname) + gimg.print_summary() + captured = capsys.readouterr() + assert captured.out.startswith('----start----\n') -def test_gifti_coord(): +def test_gifti_coord(capsys): from ..gifti import GiftiCoordSystem gcs = GiftiCoordSystem() @@ -419,6 +423,15 @@ def test_gifti_coord(): # Smoke test gcs.xform = None gcs.print_summary() + captured = capsys.readouterr() + assert captured.out == '\n'.join( + [ + 'Dataspace: NIFTI_XFORM_UNKNOWN', + 'XFormSpace: NIFTI_XFORM_UNKNOWN', + 'Affine Transformation Matrix: ', + ' None\n', + ] + ) gcs.to_xml() From 52336915707341f30492952d701df3a8f8ff6e40 Mon Sep 17 00:00:00 2001 From: Factral Date: Mon, 27 Feb 2023 16:29:58 -0500 Subject: [PATCH 280/702] added import imagestats --- nibabel/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 50dca14515..8b3e90ae1c 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -44,6 +44,7 @@ from . import spm2analyze as spm2 from . import spm99analyze as spm99 from . import streamlines, viewers +from . import imagestats # isort: split From 0427e14650ea3b3d67b1f06e1f417a0fb72e8b9b Mon Sep 17 00:00:00 2001 From: Fabian Date: Mon, 27 Feb 2023 17:26:20 -0500 Subject: [PATCH 281/702] isort fix and pre-commit executed --- nibabel/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 8b3e90ae1c..c08890ac37 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -39,12 +39,11 @@ # module imports from . import analyze as ana -from . import ecat, mriutils +from . import ecat, imagestats, mriutils from . import nifti1 as ni1 from . import spm2analyze as spm2 from . import spm99analyze as spm99 from . import streamlines, viewers -from . import imagestats # isort: split From cd1a39a837b7acacf4519cb5fbf662c586c248d3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 1 Mar 2023 18:14:55 -0500 Subject: [PATCH 282/702] Update nibabel/gifti/tests/test_gifti.py --- nibabel/gifti/tests/test_gifti.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index d4fddf4049..a2f8395cae 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -133,9 +133,7 @@ def test_image_typing(label): dtype = data_type_codes.dtype[label] if dtype == np.void: return - arr = 127 * rng.random( - 20, - ) + arr = 127 * rng.random(20) try: cast = arr.astype(label) except TypeError: From cf43308cb7d2d0df4fc16556503ff008fbb690d0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 2 Mar 2023 10:05:49 -0500 Subject: [PATCH 283/702] TYP: Add a version stub to allow mypy to run without building --- nibabel/_version.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 nibabel/_version.pyi diff --git a/nibabel/_version.pyi b/nibabel/_version.pyi new file mode 100644 index 0000000000..f3c1fd305e --- /dev/null +++ b/nibabel/_version.pyi @@ -0,0 +1,4 @@ +__version__: str +__version_tuple__: tuple[str, ...] +version: str +version_tuple: tuple[str, ...] From f7a90fe213dce4dfe4b5c93d8b5a736582f89dcf Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 9 Mar 2023 20:52:21 -0500 Subject: [PATCH 284/702] RF: Pull compression detection logic into a central private module --- nibabel/_compression.py | 49 ++++++++++++++++++++++++++++++++++++++ nibabel/filebasedimages.py | 3 ++- nibabel/openers.py | 16 +------------ nibabel/volumeutils.py | 17 +------------ 4 files changed, 53 insertions(+), 32 deletions(-) create mode 100644 nibabel/_compression.py diff --git a/nibabel/_compression.py b/nibabel/_compression.py new file mode 100644 index 0000000000..bf13895c80 --- /dev/null +++ b/nibabel/_compression.py @@ -0,0 +1,49 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Constants and types for dealing transparently with compression""" +from __future__ import annotations + +import bz2 +import gzip +import io +import typing as ty + +from .optpkg import optional_package + +if ty.TYPE_CHECKING: # pragma: no cover + import indexed_gzip # type: ignore + import pyzstd + + HAVE_INDEXED_GZIP = True + HAVE_ZSTD = True +else: + indexed_gzip, HAVE_INDEXED_GZIP, _ = optional_package('indexed_gzip') + pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') + + +# Collections of types for isinstance or exception matching +COMPRESSED_FILE_LIKES: tuple[type[io.IOBase], ...] = ( + bz2.BZ2File, + gzip.GzipFile, +) +COMPRESSION_ERRORS: tuple[type[BaseException], ...] = ( + OSError, # BZ2File + gzip.BadGzipFile, +) + +if HAVE_INDEXED_GZIP: + COMPRESSED_FILE_LIKES += (indexed_gzip.IndexedGzipFile,) + COMPRESSION_ERRORS += (indexed_gzip.ZranError,) + from indexed_gzip import IndexedGzipFile # type: ignore +else: + IndexedGzipFile = gzip.GzipFile + +if HAVE_ZSTD: + COMPRESSED_FILE_LIKES += (pyzstd.ZstdFile,) + COMPRESSION_ERRORS += (pyzstd.ZstdError,) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 685b11b79b..3d1a95c1a4 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -15,6 +15,7 @@ from typing import Type from urllib import request +from ._compression import COMPRESSION_ERRORS from .fileholders import FileHolder, FileMap from .filename_parser import TypesFilenamesError, _stringify_path, splitext_addext, types_filenames from .openers import ImageOpener @@ -421,7 +422,7 @@ def _sniff_meta_for( try: with ImageOpener(meta_fname, 'rb') as fobj: binaryblock = fobj.read(sniff_nbytes) - except (OSError, EOFError): + except COMPRESSION_ERRORS + (OSError, EOFError): return None return (binaryblock, meta_fname) diff --git a/nibabel/openers.py b/nibabel/openers.py index 3e3b2fb29f..90c7774d12 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -15,12 +15,11 @@ from bz2 import BZ2File from os.path import splitext -from nibabel.optpkg import optional_package +from ._compression import HAVE_INDEXED_GZIP, IndexedGzipFile, pyzstd if ty.TYPE_CHECKING: # pragma: no cover from types import TracebackType - import pyzstd from _typeshed import WriteableBuffer ModeRT = ty.Literal['r', 'rt'] @@ -32,8 +31,6 @@ Mode = ty.Union[ModeR, ModeW] OpenerDef = tuple[ty.Callable[..., io.IOBase], tuple[str, ...]] -else: - pyzstd = optional_package('pyzstd')[0] @ty.runtime_checkable @@ -45,17 +42,6 @@ def write(self, b: bytes, /) -> int | None: ... # pragma: no cover -try: - from indexed_gzip import IndexedGzipFile # type: ignore - - HAVE_INDEXED_GZIP = True -except ImportError: - # nibabel.openers.IndexedGzipFile is imported by nibabel.volumeutils - # to detect compressed file types, so we give a fallback value here. - IndexedGzipFile = gzip.GzipFile - HAVE_INDEXED_GZIP = False - - class DeterministicGzipFile(gzip.GzipFile): """Deterministic variant of GzipFile diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index d61a41e679..90e5e5ff35 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -9,36 +9,28 @@ """Utility functions for analyze-like formats""" from __future__ import annotations -import gzip import io import sys import typing as ty import warnings -from bz2 import BZ2File from functools import reduce from operator import getitem, mul from os.path import exists, splitext import numpy as np +from ._compression import COMPRESSED_FILE_LIKES from .casting import OK_FLOATS, shared_range from .externals.oset import OrderedSet -from .openers import IndexedGzipFile -from .optpkg import optional_package if ty.TYPE_CHECKING: # pragma: no cover import numpy.typing as npt - import pyzstd - - HAVE_ZSTD = True Scalar = np.number | float K = ty.TypeVar('K') V = ty.TypeVar('V') DT = ty.TypeVar('DT', bound=np.generic) -else: - pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' @@ -55,13 +47,6 @@ #: default compression level when writing gz and bz2 files default_compresslevel = 1 -#: file-like classes known to hold compressed data -COMPRESSED_FILE_LIKES: tuple[type[io.IOBase], ...] = (gzip.GzipFile, BZ2File, IndexedGzipFile) - -# Enable .zst support if pyzstd installed. -if HAVE_ZSTD: - COMPRESSED_FILE_LIKES = (*COMPRESSED_FILE_LIKES, pyzstd.ZstdFile) - class Recoder: """class to return canonical code(s) from code or aliases From 7cd34ff397911300f06ad5d120b2db006b98cbee Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 11 Mar 2023 08:07:09 -0500 Subject: [PATCH 285/702] TYP: Annotate loadsave --- nibabel/imageclasses.py | 10 +++++++--- nibabel/loadsave.py | 42 ++++++++++++++++++++++++----------------- 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index e2dbed129d..b36131ed94 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -7,9 +7,13 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Define supported image classes and names""" +from __future__ import annotations + from .analyze import AnalyzeImage from .brikhead import AFNIImage from .cifti2 import Cifti2Image +from .dataobj_images import DataobjImage +from .filebasedimages import FileBasedImage from .freesurfer import MGHImage from .gifti import GiftiImage from .minc1 import Minc1Image @@ -21,7 +25,7 @@ from .spm99analyze import Spm99AnalyzeImage # Ordered by the load/save priority. -all_image_classes = [ +all_image_classes: list[type[FileBasedImage]] = [ Nifti1Pair, Nifti1Image, Nifti2Pair, @@ -41,7 +45,7 @@ # Image classes known to require spatial axes to be first in index ordering. # When adding an image class, consider whether the new class should be listed # here. -KNOWN_SPATIAL_FIRST = ( +KNOWN_SPATIAL_FIRST: tuple[type[FileBasedImage], ...] = ( Nifti1Pair, Nifti1Image, Nifti2Pair, @@ -55,7 +59,7 @@ ) -def spatial_axes_first(img): +def spatial_axes_first(img: DataobjImage) -> bool: """True if spatial image axes for `img` always precede other axes Parameters diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index f12b81b30b..463a687975 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -8,7 +8,10 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # module imports """Utilities to load and save image objects""" +from __future__ import annotations + import os +import typing as ty import numpy as np @@ -22,7 +25,18 @@ _compressed_suffixes = ('.gz', '.bz2', '.zst') -def _signature_matches_extension(filename): +if ty.TYPE_CHECKING: # pragma: no cover + from .filebasedimages import FileBasedImage + from .filename_parser import FileSpec + + P = ty.ParamSpec('P') + + class Signature(ty.TypedDict): + signature: bytes + format_name: str + + +def _signature_matches_extension(filename: FileSpec) -> tuple[bool, str]: """Check if signature aka magic number matches filename extension. Parameters @@ -42,7 +56,7 @@ def _signature_matches_extension(filename): the empty string otherwise. """ - signatures = { + signatures: dict[str, Signature] = { '.gz': {'signature': b'\x1f\x8b', 'format_name': 'gzip'}, '.bz2': {'signature': b'BZh', 'format_name': 'bzip2'}, '.zst': {'signature': b'\x28\xb5\x2f\xfd', 'format_name': 'ztsd'}, @@ -64,7 +78,7 @@ def _signature_matches_extension(filename): return False, f'File {filename} is not a {format_name} file' -def load(filename, **kwargs): +def load(filename: FileSpec, **kwargs) -> FileBasedImage: r"""Load file given filename, guessing at file type Parameters @@ -126,7 +140,7 @@ def guessed_image_type(filename): raise ImageFileError(f'Cannot work out file type of "{filename}"') -def save(img, filename, **kwargs): +def save(img: FileBasedImage, filename: FileSpec, **kwargs) -> None: r"""Save an image to file adapting format to `filename` Parameters @@ -161,19 +175,17 @@ def save(img, filename, **kwargs): from .nifti1 import Nifti1Image, Nifti1Pair from .nifti2 import Nifti2Image, Nifti2Pair - klass = None - converted = None - + converted: FileBasedImage if type(img) == Nifti1Image and lext in ('.img', '.hdr'): - klass = Nifti1Pair + converted = Nifti1Pair.from_image(img) elif type(img) == Nifti2Image and lext in ('.img', '.hdr'): - klass = Nifti2Pair + converted = Nifti2Pair.from_image(img) elif type(img) == Nifti1Pair and lext == '.nii': - klass = Nifti1Image + converted = Nifti1Image.from_image(img) elif type(img) == Nifti2Pair and lext == '.nii': - klass = Nifti2Image + converted = Nifti2Image.from_image(img) else: # arbitrary conversion - valid_klasses = [klass for klass in all_image_classes if ext in klass.valid_exts] + valid_klasses = [klass for klass in all_image_classes if lext in klass.valid_exts] if not valid_klasses: # if list is empty raise ImageFileError(f'Cannot work out file type of "{filename}"') @@ -186,13 +198,9 @@ def save(img, filename, **kwargs): break except Exception as e: err = e - # ... and if none of them work, raise an error. - if converted is None: + else: raise err - # Here, we either have a klass or a converted image. - if converted is None: - converted = klass.from_image(img) converted.to_filename(filename, **kwargs) From 45cdb1cfddf9332ee13e6340744acb63c1b345e2 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 25 Mar 2023 22:43:26 -0400 Subject: [PATCH 286/702] TYP: Annotate header types --- nibabel/analyze.py | 1 + nibabel/brikhead.py | 1 + nibabel/cifti2/cifti2.py | 1 + nibabel/ecat.py | 2 +- nibabel/filebasedimages.py | 1 - nibabel/freesurfer/mghformat.py | 1 + nibabel/minc1.py | 1 + nibabel/minc2.py | 1 + nibabel/nifti1.py | 3 ++- nibabel/parrec.py | 1 + nibabel/spatialimages.py | 1 + nibabel/spm2analyze.py | 1 + nibabel/spm99analyze.py | 1 + 13 files changed, 13 insertions(+), 3 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index d738934fff..e4b0455ce6 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -896,6 +896,7 @@ class AnalyzeImage(SpatialImage): """Class for basic Analyze format image""" header_class: Type[AnalyzeHeader] = AnalyzeHeader + header: AnalyzeHeader _meta_sniff_len = header_class.sizeof_hdr files_types: tuple[tuple[str, str], ...] = (('image', '.img'), ('header', '.hdr')) valid_exts: tuple[str, ...] = ('.img', '.hdr') diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index ee5f766722..6694ff08a5 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -475,6 +475,7 @@ class AFNIImage(SpatialImage): """ header_class = AFNIHeader + header: AFNIHeader valid_exts = ('.brik', '.head') files_types = (('image', '.brik'), ('header', '.head')) _compressed_suffixes = ('.gz', '.bz2', '.Z', '.zst') diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 423dbfbf9d..b41521f0cd 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -1411,6 +1411,7 @@ class Cifti2Image(DataobjImage, SerializableImage): """Class for single file CIFTI-2 format image""" header_class = Cifti2Header + header: Cifti2Header valid_exts = Nifti2Image.valid_exts files_types = Nifti2Image.files_types makeable = False diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 23a58f752e..7f477e4a97 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -751,7 +751,7 @@ class EcatImage(SpatialImage): valid_exts = ('.v',) files_types = (('image', '.v'), ('header', '.v')) - _header: EcatHeader + header: EcatHeader _subheader: EcatSubHeader ImageArrayProxy = EcatImageArrayProxy diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 3d1a95c1a4..daf4e7e0b3 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -159,7 +159,6 @@ class FileBasedImage: """ header_class: Type[FileBasedHeader] = FileBasedHeader - _header: FileBasedHeader _meta_sniff_len: int = 0 files_types: tuple[ExtensionSpec, ...] = (('image', None),) valid_exts: tuple[str, ...] = () diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 693025efbe..5dd2660342 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -462,6 +462,7 @@ class MGHImage(SpatialImage, SerializableImage): """Class for MGH format image""" header_class = MGHHeader + header: MGHHeader valid_exts = ('.mgh', '.mgz') # Register that .mgz extension signals gzip compression ImageOpener.compress_ext_map['.mgz'] = ImageOpener.gz_def diff --git a/nibabel/minc1.py b/nibabel/minc1.py index ebc167b0ee..bf3e7e9bbc 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -308,6 +308,7 @@ class Minc1Image(SpatialImage): """ header_class: Type[MincHeader] = Minc1Header + header: MincHeader _meta_sniff_len: int = 4 valid_exts: tuple[str, ...] = ('.mnc',) files_types: tuple[tuple[str, str], ...] = (('image', '.mnc'),) diff --git a/nibabel/minc2.py b/nibabel/minc2.py index cc0cb5e440..e00608eb2f 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -150,6 +150,7 @@ class Minc2Image(Minc1Image): # MINC2 does not do compressed whole files _compressed_suffixes = () header_class = Minc2Header + header: Minc2Header @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 0c824ef6ad..71df391d9d 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1817,7 +1817,8 @@ class Nifti1PairHeader(Nifti1Header): class Nifti1Pair(analyze.AnalyzeImage): """Class for NIfTI1 format image, header pair""" - header_class: Type[Nifti1Header] = Nifti1PairHeader + header_class: type[Nifti1Header] = Nifti1PairHeader + header: Nifti1Header _meta_sniff_len = header_class.sizeof_hdr rw = True diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 22219382c8..ec3fdea711 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -1253,6 +1253,7 @@ class PARRECImage(SpatialImage): """PAR/REC image""" header_class = PARRECHeader + header: PARRECHeader valid_exts = ('.rec', '.par') files_types = (('image', '.rec'), ('header', '.par')) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index be347bd86f..73a5fcf468 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -476,6 +476,7 @@ class SpatialImage(DataobjImage): ImageSlicer: type[SpatialFirstSlicer] = SpatialFirstSlicer _header: SpatialHeader + header: SpatialHeader def __init__( self, diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index b326e7eac0..fff3ecf086 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -128,6 +128,7 @@ class Spm2AnalyzeImage(spm99.Spm99AnalyzeImage): """Class for SPM2 variant of basic Analyze image""" header_class = Spm2AnalyzeHeader + header: Spm2AnalyzeHeader load = Spm2AnalyzeImage.from_filename diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 9c2aa15ed0..9c5becc6f6 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -227,6 +227,7 @@ class Spm99AnalyzeImage(analyze.AnalyzeImage): """Class for SPM99 variant of basic Analyze image""" header_class = Spm99AnalyzeHeader + header: Spm99AnalyzeHeader files_types = (('image', '.img'), ('header', '.hdr'), ('mat', '.mat')) has_affine = True makeable = True From 9f189c6d12535c293b5c5911a50fecc6dba473bc Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 25 Mar 2023 22:44:10 -0400 Subject: [PATCH 287/702] ENH: Drop typing.Type for type --- nibabel/analyze.py | 4 +--- nibabel/filebasedimages.py | 3 +-- nibabel/minc1.py | 3 +-- nibabel/nifti1.py | 5 ++--- 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index e4b0455ce6..20fdac055a 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -83,8 +83,6 @@ """ from __future__ import annotations -from typing import Type - import numpy as np from .arrayproxy import ArrayProxy @@ -895,7 +893,7 @@ def may_contain_header(klass, binaryblock): class AnalyzeImage(SpatialImage): """Class for basic Analyze format image""" - header_class: Type[AnalyzeHeader] = AnalyzeHeader + header_class: type[AnalyzeHeader] = AnalyzeHeader header: AnalyzeHeader _meta_sniff_len = header_class.sizeof_hdr files_types: tuple[tuple[str, str], ...] = (('image', '.img'), ('header', '.hdr')) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index daf4e7e0b3..42760cccdf 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -12,7 +12,6 @@ import io import typing as ty from copy import deepcopy -from typing import Type from urllib import request from ._compression import COMPRESSION_ERRORS @@ -158,7 +157,7 @@ class FileBasedImage: work. """ - header_class: Type[FileBasedHeader] = FileBasedHeader + header_class: type[FileBasedHeader] = FileBasedHeader _meta_sniff_len: int = 0 files_types: tuple[ExtensionSpec, ...] = (('image', None),) valid_exts: tuple[str, ...] = () diff --git a/nibabel/minc1.py b/nibabel/minc1.py index bf3e7e9bbc..5f8422bc23 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -10,7 +10,6 @@ from __future__ import annotations from numbers import Integral -from typing import Type import numpy as np @@ -307,7 +306,7 @@ class Minc1Image(SpatialImage): load. """ - header_class: Type[MincHeader] = Minc1Header + header_class: type[MincHeader] = Minc1Header header: MincHeader _meta_sniff_len: int = 4 valid_exts: tuple[str, ...] = ('.mnc',) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 71df391d9d..07fb177736 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -14,7 +14,6 @@ import warnings from io import BytesIO -from typing import Type import numpy as np import numpy.linalg as npl @@ -90,8 +89,8 @@ # datatypes not in analyze format, with codes if have_binary128(): # Only enable 128 bit floats if we really have IEEE binary 128 longdoubles - _float128t: Type[np.generic] = np.longdouble - _complex256t: Type[np.generic] = np.longcomplex + _float128t: type[np.generic] = np.longdouble + _complex256t: type[np.generic] = np.longcomplex else: _float128t = np.void _complex256t = np.void From da9133a0499292a77d648db4528c5bb93762209f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 30 Mar 2023 08:40:50 -0400 Subject: [PATCH 288/702] MNT: Update mailmap --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index feabaee746..80c46f385e 100644 --- a/.mailmap +++ b/.mailmap @@ -30,6 +30,7 @@ Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Eric Larson Eric89GXL Eric Larson larsoner +Fabian Perez Fernando Pérez-García Fernando Félix C. Morency Felix C. Morency Félix C. Morency Félix C. Morency From 7d2746fde8194b39102b42838bc5ab9574094806 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 30 Mar 2023 08:45:38 -0400 Subject: [PATCH 289/702] MNT: Set minimum importlib_resources, update requirements files --- min-requirements.txt | 2 +- pyproject.toml | 2 +- requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/min-requirements.txt b/min-requirements.txt index 305f16dcbd..e30bc40a2a 100644 --- a/min-requirements.txt +++ b/min-requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py numpy ==1.19 packaging ==17 -setuptools +importlib_resources ==1.3; python_version < '3.9' diff --git a/pyproject.toml b/pyproject.toml index f944f8e685..1dbc13b43f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ requires-python = ">=3.8" dependencies = [ "numpy >=1.19", "packaging >=17", - "importlib_resources; python_version < '3.9'", + "importlib_resources >=1.3; python_version < '3.9'", ] classifiers = [ "Development Status :: 5 - Production/Stable", diff --git a/requirements.txt b/requirements.txt index 1d1e434609..a74639cf81 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py numpy >=1.19 packaging >=17 -setuptools +importlib_resources >=1.3; python_version < '3.9' From c483d98b5d2b14a2ee526c2d5dc6b6961820b4b1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 30 Mar 2023 08:47:01 -0400 Subject: [PATCH 290/702] DOC: Update Zenodo from git history --- .zenodo.json | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 75dea73eed..a436bfd31b 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -73,6 +73,10 @@ "name": "Lee, Gregory R.", "orcid": "0000-0001-8895-2740" }, + { + "name": "Baratz, Zvi", + "orcid": "0000-0001-7159-1387" + }, { "name": "Wang, Hao-Ting", "orcid": "0000-0003-4078-2038" @@ -125,10 +129,6 @@ "name": "Goncalves, Mathias", "orcid": "0000-0002-7252-7771" }, - { - "name": "Baratz, Zvi", - "orcid": "0000-0001-7159-1387" - }, { "affiliation": "Montreal Neurological Institute and Hospital", "name": "Markello, Ross", @@ -229,6 +229,9 @@ { "name": "Amirbekian, Bago" }, + { + "name": "Christian, Horea" + }, { "name": "Nimmo-Smith, Ian" }, @@ -274,6 +277,9 @@ { "name": "Fauber, Bennet" }, + { + "name": "Perez, Fabian" + }, { "name": "Roberts, Jacob" }, From 9e1d82230a34ea1079ab7edb3ec71624029862f7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 31 Mar 2023 16:48:25 -0400 Subject: [PATCH 291/702] DOC: Update contributor list --- doc/source/index.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index 701de01362..48db1d31a4 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -123,6 +123,8 @@ contributed code and discussion (in rough order of appearance): * Andrew Van * Jérôme Dockès * Jacob Roberts +* Horea Christian +* Fabian Perez License reprise =============== From 82083e9e8a986f8c94319452e6eb8c230683590a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 31 Mar 2023 16:53:08 -0400 Subject: [PATCH 292/702] DOC: Drop setuptools from listed dependencies, add importlib-resources --- doc/source/installation.rst | 2 +- doc/source/links_names.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/installation.rst b/doc/source/installation.rst index 65a35ea333..b896d2dfc1 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -86,7 +86,7 @@ Requirements * Python_ 3.8 or greater * NumPy_ 1.19 or greater * Packaging_ 17.0 or greater -* Setuptools_ +* importlib-resources_ 1.3 or greater (or Python 3.9+) * SciPy_ (optional, for full SPM-ANALYZE support) * h5py_ (optional, for MINC2 support) * PyDICOM_ 1.0.0 or greater (optional, for DICOM support) diff --git a/doc/source/links_names.txt b/doc/source/links_names.txt index 7fbb27b12e..1ab1242c08 100644 --- a/doc/source/links_names.txt +++ b/doc/source/links_names.txt @@ -114,6 +114,7 @@ .. _python imaging library: https://pypi.python.org/pypi/Pillow .. _h5py: https://www.h5py.org/ .. _packaging: https://packaging.pypa.io +.. _importlib-resources: https://importlib-resources.readthedocs.io/ .. Python imaging projects .. _PyMVPA: http://www.pymvpa.org From 39b15a91791613a96389ef427eb6abf2d859af51 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 3 Apr 2023 08:04:27 -0400 Subject: [PATCH 293/702] DOC: 5.1.0 release notes --- Changelog | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/Changelog b/Changelog index 69e55d1a9c..e5bbac91ae 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,48 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +5.1.0 (Monday 3 April 2023) +=========================== + +New feature release in the 5.1.x series. + +Enhancements +------------ +* Make :mod:`nibabel.imagestats` available with ``import nibabel`` (pr/1208) + (Fabian Perez, reviewed by CM) +* Use symmetric threshold for identifying unit quaternions on qform + calculations (pr/1182) (CM, reviewed by MB) +* Type annotations for :mod:`~nibabel.loadsave` (pr/1213) and + :class:`~nibabel.spatialimages.SpatialImage` APIs (pr/1179), + :mod:`~nibabel.deprecated`, :mod:`~nibabel.deprecator`, + :mod:`~nibabel.onetime` and :mod:`~nibabel.optpkg` modules (pr/1188), + :mod:`~nibabel.volumeutils` (pr/1189), :mod:`~nibabel.filename_parser` and + :mod:`~nibabel.openers` (pr/1197) (CM, reviewed by Zvi Baratz) + +Bug fixes +--------- +* Require explicit overrides to write GIFTI files that contain data arrays + with data types not permitted by the GIFTI standard (pr/1199) (CM, reviewed + by Alexis Thual) + +Maintenance +----------- +* Move compression detection logic into a private ``nibabel._compression`` + module, resolving unexpected errors from pyzstd. (pr/1212) (CM) +* Improved consistency of docstring formatting (pr/1200) (Zvi Baratz, reviewed + by CM) +* Modernized README text (pr/1195) (Zvi Baratz, reviewed by CM) +* Updated README badges to include package distributions (pr/1192) (Horea + Christian, reviewed by CM) +* Removed all dependencies on distutils and setuptools (pr/1190) (CM, + reviewed by Zvi Baratz) +* Add a ``_version.pyi`` stub to allow mypy_ to run without building nibabel + (pr/1210) (CM) + + +.. _mypy: https://mypy.readthedocs.io/ + + 5.0.1 (Sunday 12 February 2023) =============================== From f688a8daf9bbd877a5b762c727fa66c8c68b2f36 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 3 Apr 2023 11:55:53 -0400 Subject: [PATCH 294/702] DOC: Link to logo with full URL --- README.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 45856f6795..77d6f55311 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,8 @@ .. -*- rest -*- .. vim:syntax=rst -.. image:: doc/pics/logo.png +.. Use raw location to ensure image shows up on PyPI +.. image:: https://raw.githubusercontent.com/nipy/nibabel/master/doc/pics/logo.png :target: https://nipy.org/nibabel :alt: NiBabel logo From 8bb1b99f33a2ec984e18e125a5c79fa8af77f239 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 4 Apr 2023 13:28:31 -0400 Subject: [PATCH 295/702] FIX: Catch random bad slice when testing image slicing --- nibabel/tests/test_spatialimages.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index b4fc7e21b7..95d3a2a151 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -540,15 +540,15 @@ def test_slicer(self): sliceobj = tuple(np.random.choice(slice_elems, n_elems)) try: sliced_img = img.slicer[sliceobj] - except (IndexError, ValueError): - # Only checking valid slices - pass - else: - sliced_data = in_data[sliceobj] - assert (sliced_data == sliced_img.get_fdata()).all() - assert (sliced_data == sliced_img.dataobj).all() - assert (sliced_data == img.dataobj[sliceobj]).all() - assert (sliced_data == img.get_fdata()[sliceobj]).all() + except (IndexError, ValueError, HeaderDataError): + # Skip invalid slices or images that can't be created + continue + + sliced_data = in_data[sliceobj] + assert np.array_equal(sliced_data, sliced_img.get_fdata()) + assert np.array_equal(sliced_data, sliced_img.dataobj) + assert np.array_equal(sliced_data, img.dataobj[sliceobj]) + assert np.array_equal(sliced_data, img.get_fdata()[sliceobj]) class MmapImageMixin: From 9341c5766505338ac3a77bb33fedf30107ce4869 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 17 Apr 2023 08:54:32 -0400 Subject: [PATCH 296/702] CI: Switch to codecov action --- .github/workflows/misc.yml | 3 --- .github/workflows/pre-release.yml | 5 +++-- .github/workflows/stable.yml | 5 +++-- tools/ci/check.sh | 2 +- tools/ci/submit_coverage.sh | 21 --------------------- 5 files changed, 7 insertions(+), 29 deletions(-) delete mode 100755 tools/ci/submit_coverage.sh diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index ade350aaa7..90645b40eb 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -59,9 +59,6 @@ jobs: - name: Run tests run: tools/ci/check.sh if: ${{ matrix.check != 'skiptests' }} - - name: Submit coverage - run: tools/ci/submit_coverage.sh - if: ${{ always() }} - name: Upload pytest test results uses: actions/upload-artifact@v3 with: diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 9ceb4033ae..630f09d99b 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -87,9 +87,10 @@ jobs: - name: Run tests run: tools/ci/check.sh if: ${{ matrix.check != 'skiptests' }} - - name: Submit coverage - run: tools/ci/submit_coverage.sh + - uses: codecov/codecov-action@v3 if: ${{ always() }} + with: + files: cov.xml - name: Upload pytest test results uses: actions/upload-artifact@v3 with: diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 315534107f..18a30d6d07 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -181,9 +181,10 @@ jobs: - name: Run tests if: ${{ matrix.check != 'skiptests' }} run: tools/ci/check.sh - - name: Submit coverage + - uses: codecov/codecov-action@v3 if: ${{ always() }} - run: tools/ci/submit_coverage.sh + with: + files: cov.xml - name: Upload pytest test results if: ${{ always() && matrix.check == 'test' }} uses: actions/upload-artifact@v3 diff --git a/tools/ci/check.sh b/tools/ci/check.sh index bcb1a934e2..cd90650722 100755 --- a/tools/ci/check.sh +++ b/tools/ci/check.sh @@ -23,7 +23,7 @@ elif [ "${CHECK_TYPE}" == "test" ]; then mkdir for_testing cd for_testing cp ../.coveragerc . - pytest --doctest-modules --doctest-plus --cov nibabel --cov-report xml \ + pytest --doctest-modules --doctest-plus --cov nibabel --cov-report xml:../cov.xml \ --junitxml=test-results.xml -v --pyargs nibabel -n auto elif [ "${CHECK_TYPE}" == "typing" ]; then mypy nibabel diff --git a/tools/ci/submit_coverage.sh b/tools/ci/submit_coverage.sh deleted file mode 100755 index 17bfe3933b..0000000000 --- a/tools/ci/submit_coverage.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -echo Submitting coverage - -source tools/ci/activate.sh - -set -eu - -set -x - -COVERAGE_FILE="for_testing/coverage.xml" - -if [ -e "$COVERAGE_FILE" ]; then - # Pin codecov version to reduce scope for malicious updates - python -m pip install "codecov==2.1.11" - python -m codecov --file for_testing/coverage.xml -fi - -set +eux - -echo Done submitting coverage From 58271684a8fd406874c9a4549b125601fc25e052 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 20 Apr 2023 09:53:06 -0400 Subject: [PATCH 297/702] ENH: Catch SVD failure and raise informative HeaderDataError --- nibabel/nifti1.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 9bb88e844c..8502ad4fa6 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1098,7 +1098,10 @@ def set_qform(self, affine, code=None, strip_shears=True): # (a subtle requirement of the NIFTI format qform transform) # Transform below is polar decomposition, returning the closest # orthogonal matrix PR, to input R - P, S, Qs = npl.svd(R) + try: + P, S, Qs = npl.svd(R) + except np.linalg.LinAlgError as e: + raise HeaderDataError(f'Could not decompose affine:\n{affine}') from e PR = np.dot(P, Qs) if not strip_shears and not np.allclose(PR, R): raise HeaderDataError('Shears in affine and `strip_shears` is False') From 2157139007d5dc5067b785afbfbe27d10474745a Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Sat, 22 Apr 2023 12:45:26 +0100 Subject: [PATCH 298/702] RF: refactor find_private_element Neater and more readable version of find_private_element, extend tests. --- nibabel/nicom/tests/test_utils.py | 54 ++++++++++++++++++++----------- nibabel/nicom/utils.py | 26 ++++++--------- 2 files changed, 45 insertions(+), 35 deletions(-) diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index 37dbcd7d19..ea3b999fad 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -4,7 +4,7 @@ from nibabel.optpkg import optional_package -from ..utils import find_private_section +from ..utils import find_private_section as fps from .test_dicomwrappers import DATA, DATA_PHILIPS pydicom, _, setup_module = optional_package('pydicom') @@ -13,37 +13,53 @@ def test_find_private_section_real(): # Find section containing named private creator information # On real data first - assert find_private_section(DATA, 0x29, 'SIEMENS CSA HEADER') == 0x1000 - assert find_private_section(DATA, 0x29, 'SIEMENS MEDCOM HEADER2') == 0x1100 - assert find_private_section(DATA_PHILIPS, 0x29, 'SIEMENS CSA HEADER') == None - # Make fake datasets + assert fps(DATA, 0x29, 'SIEMENS CSA HEADER') == 0x1000 + assert fps(DATA, 0x29, b'SIEMENS CSA HEADER') == 0x1000 + assert fps(DATA, 0x29, re.compile('SIEMENS CSA HEADER')) == 0x1000 + assert fps(DATA, 0x29, 'NOT A HEADER') is None + assert fps(DATA, 0x29, 'SIEMENS MEDCOM HEADER2') == 0x1100 + assert fps(DATA_PHILIPS, 0x29, 'SIEMENS CSA HEADER') == None + + +def test_find_private_section_fake(): + # Make and test fake datasets ds = pydicom.dataset.Dataset({}) + assert fps(ds, 0x11, 'some section') is None ds.add_new((0x11, 0x10), 'LO', b'some section') - assert find_private_section(ds, 0x11, 'some section') == 0x1000 - ds.add_new((0x11, 0x11), 'LO', b'anther section') + assert fps(ds, 0x11, 'some section') == 0x1000 + ds.add_new((0x11, 0x11), 'LO', b'another section') ds.add_new((0x11, 0x12), 'LO', b'third section') - assert find_private_section(ds, 0x11, 'third section') == 0x1200 - # Wrong 'OB' is acceptable for VM (should be 'LO') + assert fps(ds, 0x11, 'third section') == 0x1200 + # Technically incorrect 'OB' is acceptable for VM (should be 'LO') ds.add_new((0x11, 0x12), 'OB', b'third section') - assert find_private_section(ds, 0x11, 'third section') == 0x1200 + assert fps(ds, 0x11, 'third section') == 0x1200 # Anything else not acceptable ds.add_new((0x11, 0x12), 'PN', b'third section') - assert find_private_section(ds, 0x11, 'third section') is None + assert fps(ds, 0x11, 'third section') is None # The input (DICOM value) can be a string insteal of bytes ds.add_new((0x11, 0x12), 'LO', 'third section') - assert find_private_section(ds, 0x11, 'third section') == 0x1200 + assert fps(ds, 0x11, 'third section') == 0x1200 # Search can be bytes as well as string ds.add_new((0x11, 0x12), 'LO', b'third section') - assert find_private_section(ds, 0x11, b'third section') == 0x1200 + assert fps(ds, 0x11, b'third section') == 0x1200 # Search with string or bytes must be exact - assert find_private_section(ds, 0x11, b'third sectio') is None - assert find_private_section(ds, 0x11, 'hird sectio') is None + assert fps(ds, 0x11, b'third sectio') is None + assert fps(ds, 0x11, 'hird sectio') is None # The search can be a regexp - assert find_private_section(ds, 0x11, re.compile(r'third\Wsectio[nN]')) == 0x1200 + assert fps(ds, 0x11, re.compile(r'third\Wsectio[nN]')) == 0x1200 # No match -> None - assert find_private_section(ds, 0x11, re.compile(r'not third\Wsectio[nN]')) is None + assert fps(ds, 0x11, re.compile(r'not third\Wsectio[nN]')) is None # If there are gaps in the sequence before the one we want, that is OK ds.add_new((0x11, 0x13), 'LO', b'near section') - assert find_private_section(ds, 0x11, 'near section') == 0x1300 + assert fps(ds, 0x11, 'near section') == 0x1300 ds.add_new((0x11, 0x15), 'LO', b'far section') - assert find_private_section(ds, 0x11, 'far section') == 0x1500 + assert fps(ds, 0x11, 'far section') == 0x1500 + # More than one match - find the first. + assert fps(ds, 0x11, re.compile('(another|third) section')) == 0x1100 + # The signalling element number must be <= 0xFF + ds = pydicom.dataset.Dataset({}) + ds.add_new((0x11, 0xFF), 'LO', b'some section') + assert fps(ds, 0x11, 'some section') == 0xFF00 + ds = pydicom.dataset.Dataset({}) + ds.add_new((0x11, 0x100), 'LO', b'some section') + assert fps(ds, 0x11, 'some section') is None diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 48a010903a..1610c49e9d 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -27,26 +27,20 @@ def find_private_section(dcm_data, group_no, creator): Returns ------- element_start : int - Element number at which named section starts + Element number at which named section starts. """ - is_regex = hasattr(creator, 'search') - if not is_regex: # assume string / bytes + if hasattr(creator, 'search'): + match_func = lambda x : creator.search(x) + else: # assume string / bytes creator = asstr(creator) - for element in dcm_data: # Assumed ordered by tag (groupno, elno) - grpno, elno = element.tag.group, element.tag.elem - if grpno > group_no: - break - if grpno != group_no: - continue + match_func = lambda x : x == creator + # Group elements assumed ordered by tag (groupno, elno) + for element in dcm_data.group_dataset(group_no): + elno = element.tag.elem if elno > 0xFF: break if element.VR not in ('LO', 'OB'): continue - name = asstr(element.value) - if is_regex: - if creator.search(name) is not None: - return elno * 0x100 - else: # string - needs exact match - if creator == name: - return elno * 0x100 + if match_func(asstr(element.value)): + return elno * 0x100 return None From c7c667813aa5ba3468ba1713d54ad71a8d706e78 Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Sat, 22 Apr 2023 16:07:18 +0100 Subject: [PATCH 299/702] Update nibabel/nicom/utils.py Co-authored-by: Chris Markiewicz --- nibabel/nicom/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 1610c49e9d..b93aa51680 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -30,7 +30,7 @@ def find_private_section(dcm_data, group_no, creator): Element number at which named section starts. """ if hasattr(creator, 'search'): - match_func = lambda x : creator.search(x) + match_func = creator.search else: # assume string / bytes creator = asstr(creator) match_func = lambda x : x == creator From 613a7d2655b9cb69b2579001edb7cb14b05986ad Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Sat, 22 Apr 2023 16:13:38 +0100 Subject: [PATCH 300/702] Update nibabel/nicom/utils.py Co-authored-by: Chris Markiewicz --- nibabel/nicom/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index b93aa51680..022bd0af85 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -33,7 +33,7 @@ def find_private_section(dcm_data, group_no, creator): match_func = creator.search else: # assume string / bytes creator = asstr(creator) - match_func = lambda x : x == creator + match_func = asstr(creator).__eq__ # Group elements assumed ordered by tag (groupno, elno) for element in dcm_data.group_dataset(group_no): elno = element.tag.elem From 77b7a3379eb942a31271cd04cb519a9c77d164aa Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Sat, 22 Apr 2023 16:24:23 +0100 Subject: [PATCH 301/702] Update nibabel/nicom/utils.py Co-authored-by: Chris Markiewicz --- nibabel/nicom/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 022bd0af85..f62bc72c5a 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -32,7 +32,6 @@ def find_private_section(dcm_data, group_no, creator): if hasattr(creator, 'search'): match_func = creator.search else: # assume string / bytes - creator = asstr(creator) match_func = asstr(creator).__eq__ # Group elements assumed ordered by tag (groupno, elno) for element in dcm_data.group_dataset(group_no): From 57a3add792f26a465d92fe35bc72dc7ba7afcee0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 27 Jun 2023 07:56:59 -0400 Subject: [PATCH 302/702] TEST: Remove potentially unstable argsort from parrec tests --- nibabel/tests/test_parrec.py | 21 +++++++-------------- nibabel/tests/test_scripts.py | 4 ++-- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 0a9d7c7dc2..de81c00397 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -173,6 +173,8 @@ # DTI.PAR values for bvecs DTI_PAR_BVALS = [1000] * 6 + [0, 1000] +# Numpy's argsort can be unstable so write indices manually +DTI_PAR_BVALS_SORT_IDCS = [6, 0, 1, 2, 3, 4, 5, 7] EXAMPLE_IMAGES = [ # Parameters come from load of Philips' conversion to NIfTI @@ -192,15 +194,6 @@ ] -def _shuffle(arr): - """Return a copy of the array with entries shuffled. - - Needed to avoid a bug in np.random.shuffle for numpy 1.7. - see: numpy/numpy#4286 - """ - return arr[np.argsort(np.random.randn(len(arr)))] - - def test_top_level_load(): # Test PARREC images can be loaded from nib.load img = top_load(EG_PAR) @@ -332,7 +325,7 @@ def test_sorting_dual_echo_T1(): t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order - t1_hdr.image_defs = _shuffle(t1_hdr.image_defs) + np.random.shuffle(t1_hdr.image_defs) sorted_indices = t1_hdr.get_sorted_slice_indices() sorted_echos = t1_hdr.image_defs['echo number'][sorted_indices] @@ -363,7 +356,7 @@ def test_sorting_multiple_echos_and_contrasts(): t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order - t1_hdr.image_defs = _shuffle(t1_hdr.image_defs) + np.random.shuffle(t1_hdr.image_defs) sorted_indices = t1_hdr.get_sorted_slice_indices() sorted_slices = t1_hdr.image_defs['slice number'][sorted_indices] @@ -402,7 +395,7 @@ def test_sorting_multiecho_ASL(): asl_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order - asl_hdr.image_defs = _shuffle(asl_hdr.image_defs) + np.random.shuffle(asl_hdr.image_defs) sorted_indices = asl_hdr.get_sorted_slice_indices() sorted_slices = asl_hdr.image_defs['slice number'][sorted_indices] @@ -524,7 +517,7 @@ def test_diffusion_parameters_strict_sort(): dti_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order - dti_hdr.image_defs = _shuffle(dti_hdr.image_defs) + np.random.shuffle(dti_hdr.image_defs) assert dti_hdr.get_data_shape() == (80, 80, 10, 8) assert dti_hdr.general_info['diffusion'] == 1 @@ -533,7 +526,7 @@ def test_diffusion_parameters_strict_sort(): # DTI_PAR_BVECS gives bvecs copied from first slice each vol in DTI.PAR # Permute to match bvec directions to acquisition directions # note that bval sorting occurs prior to bvec sorting - assert_almost_equal(bvecs, DTI_PAR_BVECS[np.ix_(np.argsort(DTI_PAR_BVALS), [2, 0, 1])]) + assert_almost_equal(bvecs, DTI_PAR_BVECS[np.ix_(DTI_PAR_BVALS_SORT_IDCS, [2, 0, 1])]) # Check q vectors assert_almost_equal(dti_hdr.get_q_vectors(), bvals[:, None] * bvecs) diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 9f07b3933b..a61c867d69 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -27,7 +27,7 @@ from ..tmpdirs import InTemporaryDirectory from .nibabel_data import needs_nibabel_data from .scriptrunner import ScriptRunner -from .test_parrec import DTI_PAR_BVALS, DTI_PAR_BVECS +from .test_parrec import DTI_PAR_BVALS, DTI_PAR_BVALS_SORT_IDCS, DTI_PAR_BVECS from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLES from .test_parrec_data import AFF_OFF, BALLS @@ -418,7 +418,7 @@ def test_parrec2nii_with_data(): assert_almost_equal(np.loadtxt('DTI.bvals'), np.sort(DTI_PAR_BVALS)) img = load('DTI.nii') data_sorted = img.get_fdata() - assert_almost_equal(data[..., np.argsort(DTI_PAR_BVALS)], data_sorted) + assert_almost_equal(data[..., DTI_PAR_BVALS_SORT_IDCS], data_sorted) del img # Writes .ordering.csv if requested From c385a533cf39ef935919bf7ceb0ae385b2e3cc17 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 27 Jun 2023 10:33:38 -0400 Subject: [PATCH 303/702] FIX: Use stable argsort --- nibabel/tests/test_parrec.py | 7 ++++--- nibabel/tests/test_scripts.py | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index de81c00397..c411d69003 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -173,8 +173,6 @@ # DTI.PAR values for bvecs DTI_PAR_BVALS = [1000] * 6 + [0, 1000] -# Numpy's argsort can be unstable so write indices manually -DTI_PAR_BVALS_SORT_IDCS = [6, 0, 1, 2, 3, 4, 5, 7] EXAMPLE_IMAGES = [ # Parameters come from load of Philips' conversion to NIfTI @@ -526,7 +524,10 @@ def test_diffusion_parameters_strict_sort(): # DTI_PAR_BVECS gives bvecs copied from first slice each vol in DTI.PAR # Permute to match bvec directions to acquisition directions # note that bval sorting occurs prior to bvec sorting - assert_almost_equal(bvecs, DTI_PAR_BVECS[np.ix_(DTI_PAR_BVALS_SORT_IDCS, [2, 0, 1])]) + assert_almost_equal( + bvecs, + DTI_PAR_BVECS[np.ix_(np.argsort(DTI_PAR_BVALS, kind='stable'), [2, 0, 1])], + ) # Check q vectors assert_almost_equal(dti_hdr.get_q_vectors(), bvals[:, None] * bvecs) diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index a61c867d69..e875065c8d 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -27,7 +27,7 @@ from ..tmpdirs import InTemporaryDirectory from .nibabel_data import needs_nibabel_data from .scriptrunner import ScriptRunner -from .test_parrec import DTI_PAR_BVALS, DTI_PAR_BVALS_SORT_IDCS, DTI_PAR_BVECS +from .test_parrec import DTI_PAR_BVALS, DTI_PAR_BVECS from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLES from .test_parrec_data import AFF_OFF, BALLS @@ -418,7 +418,7 @@ def test_parrec2nii_with_data(): assert_almost_equal(np.loadtxt('DTI.bvals'), np.sort(DTI_PAR_BVALS)) img = load('DTI.nii') data_sorted = img.get_fdata() - assert_almost_equal(data[..., DTI_PAR_BVALS_SORT_IDCS], data_sorted) + assert_almost_equal(data[..., np.argsort(DTI_PAR_BVALS, kind='stable')], data_sorted) del img # Writes .ordering.csv if requested From 97040347f38116ee9ab5122a0090ddf62d8d7110 Mon Sep 17 00:00:00 2001 From: Peter Suter Date: Tue, 11 Jul 2023 20:16:41 +0200 Subject: [PATCH 304/702] ENH: only warn about invalid Minc2 spacing declaration Accept other values (like `xspacing`), assuming regular spacing. #1236 --- nibabel/minc2.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nibabel/minc2.py b/nibabel/minc2.py index e00608eb2f..5ad8a8495f 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -25,6 +25,7 @@ mincstats my_funny.mnc """ +import warnings import numpy as np from .minc1 import Minc1File, Minc1Image, MincError, MincHeader @@ -58,8 +59,12 @@ def __init__(self, mincfile): # We don't currently support irregular spacing # https://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Dimension_variable_attributes for dim in self._dims: - if dim.spacing != b'regular__': - raise ValueError('Irregular spacing not supported') + if hasattr(dim, 'spacing'): + if dim.spacing == b'irregular': + raise ValueError('Irregular spacing not supported') + elif dim.spacing != b'regular__': + warnings.warn(f'Invalid spacing declaration: {dim.spacing}; assuming regular') + self._spatial_dims = [name for name in self._dim_names if name.endswith('space')] self._image_max = image['image-max'] self._image_min = image['image-min'] From ff4f855d9a493d9bba85d662b3b2579f82f816ca Mon Sep 17 00:00:00 2001 From: Peter Suter Date: Tue, 11 Jul 2023 20:43:18 +0200 Subject: [PATCH 305/702] Update nibabel/minc2.py Co-authored-by: Chris Markiewicz --- nibabel/minc2.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 5ad8a8495f..d02eb6cefc 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -59,11 +59,12 @@ def __init__(self, mincfile): # We don't currently support irregular spacing # https://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Dimension_variable_attributes for dim in self._dims: - if hasattr(dim, 'spacing'): - if dim.spacing == b'irregular': - raise ValueError('Irregular spacing not supported') - elif dim.spacing != b'regular__': - warnings.warn(f'Invalid spacing declaration: {dim.spacing}; assuming regular') + # "If this attribute is absent, a value of regular__ should be assumed." + spacing = getattr(dim, 'spacing', b'regular__') + if spacing == b'irregular': + raise ValueError('Irregular spacing not supported') + elif spacing != b'regular__': + warnings.warn(f'Invalid spacing declaration: {spacing}; assuming regular') self._spatial_dims = [name for name in self._dim_names if name.endswith('space')] self._image_max = image['image-max'] From 43895ef4af0bd5f141e00effe4fd4dda04dc7217 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 12 Jul 2023 10:48:10 -0400 Subject: [PATCH 306/702] TEST: Add test file with bad spacing field --- nibabel/tests/data/minc2_baddim.mnc | Bin 0 -> 19825 bytes nibabel/tests/test_minc2.py | 10 ++++++++++ 2 files changed, 10 insertions(+) create mode 100644 nibabel/tests/data/minc2_baddim.mnc diff --git a/nibabel/tests/data/minc2_baddim.mnc b/nibabel/tests/data/minc2_baddim.mnc new file mode 100644 index 0000000000000000000000000000000000000000..c7de97bd5e0c257bece9f6a8b667494f0661ff51 GIT binary patch literal 19825 zcmeHP4RjRM6@Hr~5f-R`5epU_6|5C@cV>3>FBn)Li9r*L5~x6NJK3EiYj$_m*;x{> z_6X<^^(caWw5M8X{j2nFDwV3$YN{5i9xJs~j~-h^TD8crSUjyodg#6HXR`^}5D*VW z_vPp9+;{JN_uYB-oBQ6(?6vhZb)#JqUC!adi6{qnxvY^9#R&we;i*JA=2kuO(_9;wB|dzHdebc*Y@;U0h34RAR64L( zC!$%dcp_z_vk5buT@1EjsW=`Rh9%Oi=0}1w21(_Kl-_2nDPwFWjw8be`IVC$)x&36 zXzMQ;KizqxW{t(JDpq7Odd%46VoEC*aXBScf_+#xv5OLCfRkyN!0Z01|J;$d;YiJ5 z-pW{J%*bXF>9+0)-owoPdO9px%US=JexzoNLT@?bQ~7Bi^@r8-fXqAgzV*K&HLDWM z8bRlaJEeEEpTv5q=tWPY*PqPk1@r6iQ@K^Z+h2L(`QdNYT~yyhFks6FNQxrDZYw9G z#hmP0^LF7gAz!QpaiIZ5h2<=$)wzOyXY9bP+0FGeU=hrIn5raxRyUV$646Up?F(<$ z%?B9dh^}mX|C*idrFf&y5|Y_C<4pbWmWp`nk@Wz4@-6A*ocpTpswWNFlDSVuCZIa6tls>r@aD%9s8;EG;heaz8%S#p@#VjEZ`Rh5a_C<=y(X4e;zLyE@kG{{4 zLfAK0q_}As)73YpaC|ztnj^*S$e}Q#`1XiNV@MET@N7m{v+tEDB#9hT8R2zj*)*~i zyZ17}86Qs7NF@@!$q4_Pv~3odj)XHA!9DdkgFJzRC5*74eN{X8Jsgt;M%cCXnYCmB zc6$mVki?x2k}Bj-8DZsVP1{-aGno+{m~zEmh>l7|+!vmBD!6^Fa!Y zXX@KT#y%u-icH7$7Y0U(J8|wo*)7H?;#}0MIUmFPuxJHhc}b?4u0E=TKq&XvhkcP^ z1};LN^dQC9ktd(dkV4qU%{=7lV^a~NxU#x)e=(#89TF*S!tM)^;zT4ABE>mKC`5`U zk#G=Fz#a`QdMHv%Jba{RL8S#q(OI)`9Z*VTP#$1NQBlrz!$eI*2eta>Kc~2aul)Cj z9e^Q;6yq1<-WeDvAnB%MJnSveBH~=soIHxxhpLJoMOesv^v2Tn`XWU;@4E*nDi_}W zTZR)RQ7>uS@F+)$TTxbKAxm=#8AF!uQ_!5E137q-L34^V z=%qrW7=;8_wqn@RoI*pw6h@#q#luKA2q|g`kYXHmJBbl`atfa8^q*5KLZt;r@tytG zfmNk4I1db%Q$U=TIUEkY*-J$2tR#-H(oDM1M@WQdBZ+nZw$lptFLv?$izaRcTa~Q& zIy;;wc7uO7I4h40m+{8)V+KZwAdDJIM2trugs53_Polt&i&aIC!Y}xDU3=T*eUV~4 z?|WFjmxX&)I~h_4`!GbXh?*T?0@Hg(b4QV!;-UIKe9Vzz1IiYzK2k_1Tx?87!a>Ub zjst&oPTjlqDpNw9QW;7f_`Hy!YBKXeZ=~4r4MwejivcZPXr#E|+J^&!LyAkqcmzU- znh&$jA*UW(0bib#MKxW0Kwz@6b=CNn`y$2Tyzd^Qc=-MB%p*gJ{Y8+XbxZeuOGqKk z!dJ5%5GgdsDVzuA6e71MR*QQjm>&|jxmcKMTsN8dc{{&5!g7r_B}Pr3#%i*P*T=Ma z=Kgao92hAcVM8Rh2)jj`i<;Xn?sFe%I3SjnEIK6&U$y=CY zOh$VqGY&b^@^`lR=wk%TI-a~^F=#B6FPaCsUwUZS{=3&jMo)Zc(Wa&ABHMS?#z!=) zi)`6ef6AGETo>7Q=ZZ~>>()ii-Z6RO1uw6UJn{6cw?Fmo8zYl$e(~xJn{JMLeAVl- zF1>YqWYLb_e0S$B*GHJXm0aISu5TsRx035y$@Q(``o7Qgt>*exaeb>I-e9H2?==BmG`v`IW< z;DEdOI!)7`yrkiL+9d8OI^Z^53vK$r6J58_CUG0Vaj%0crcIlhu6m6&iG4!{Y}7qR z6SKM9t{iO=8wEX0WB19|(AIQa<3up;@ns*v|es>ysViz8atY9L`bSn|3{P>tC@;A<%H%H3adOEIK zaS7Y?mZSj^!}AjM@{B=czQ$$REHjsh@>0NRSom}^WySkRdG!lfg9JtK$RIF^RKgEg zrY95esHLad4AS53GwxJV?{Z~O@$z;ze&{~%RwVC@n3>8X4VzhtYaEoct(*}>)>0{X zV90rZ#=qD$rPkN3D}NzB<9o$FZj|7RXYmg)c2pxCa3QMOwv}kfL4@^y!yqGm$a%hG zaLEJ5-UBrL#SKDg9!_!b2Tojp zmYe`GXL6Z~C3s!aGW4vGm0B$`C1rFw0bEkjXtgEVl&nNs zyPfYA-$8_z0;vj-=CfrE2y#(-cMPEro*4{%!6HZuJ$J;1o# z7d~>z)gSz1=1lY^X9w>ToS9#!_%0Pg1SHMj|JlaT?=cm z7DYG0p?J8(7trIXrnIQkcLSVUTq+ipgDoy>Eqy*b5777*4|7w|>pz(>`%v+Zr#nAS zj?RL=7XW~Au{R(P0D2sF!GBQvg9JX(htqOVvG~uJ5S~`Tv=9XJH2d7sdmacthnV7) z(UwaV#Q|Ux%Or*apzLsfV30U)5yHlW&hDdz17J5TJ<*jvzZ?gk8KGkU9Dvr5oTCQ^ zfKV!5To2Ir7q5L$Gw&HS5w3&-U;l?n{KEsjbiyt!7XR?I2>I~O<}&p25D5R7uGfB= zsUCCm!(U9)Jv1SqTfn987f$&y)-?Xb3*vNuHRCHEa*_!wd9z{XJlkqoDah;0wCrw8 zz=}Xmu^n)6MXb>Rvh3+<#q4kZxwvlYG>cyinK@oY8t2WGnnedv9jJCqk>)^}ZX0oy zrrUa(gk4I_dRz3|q}ie;qw_m*$z*gcB&Si^jIzFg4@7hLO&(GJl6wnGgloB!gp62+ z3NnA&oxLolTSnI9%J&&+cC?W8A(C721kVS)T zzp4fUK3`Y^HL@B+rjY6n`urhZNY-RU2}(-Hw*JPg`cyOqdRwPJ*InpQ^@OkbZzhS{#bq=Xsn@hMe)B^U?< zs}#V4(F{lM>yo0&3UYaoB�HBl~(Tx$TLpZCcAIPTU9mVVBMAIjPQQks!2HP#&&= z=?&e=GiJ3FMAawiiV zhBU7sszr5hAADg`)ac5I^DhTnGYV2n14bZY5_te!%viREumIN=zn(;>>woY zuBzJ~Qe}6u*=bl+-gYx(@WIuXu^in~@is1Ko;9zT4H8BdjagF}iFc9hcmS zf66rc*YBqux3b}(4mvXQ?0LDQ1fGStqRCZ0L{Qa-bH$s>TC~^RE>0UsgAPwpWd2)Y zcBYf29?yF1RHon@_NI%o6sJAIL>+>q*Ruv?aACd%{$SyZaR=WkL&)#&196lCjkY-M NbC5-}wc67>>Hoa>xl{lE literal 0 HcmV?d00001 diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index 251393818a..e76cb05ce7 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -10,6 +10,7 @@ from os.path import join as pjoin import numpy as np +import pytest from .. import minc2 from ..minc2 import Minc2File, Minc2Image @@ -121,3 +122,12 @@ class TestMinc2Image(tm2.TestMinc1Image): image_class = Minc2Image eg_images = (pjoin(data_path, 'small.mnc'),) module = minc2 + + +def test_bad_diminfo(): + fname = pjoin(data_path, 'minc2_baddim.mnc') + # File has a bad spacing field 'xspace' when it should be + # `irregular`, `regular__` or absent (default to regular__). + # We interpret an invalid spacing as absent, but warn. + with pytest.warns(UserWarning) as w: + Minc2Image.from_filename(fname) From 1402f18e6131aed5e4c62cd98f0d8bd087451c73 Mon Sep 17 00:00:00 2001 From: Peter Suter Date: Thu, 13 Jul 2023 08:16:47 +0200 Subject: [PATCH 307/702] Update .zenodo.json --- .zenodo.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index a436bfd31b..d79c0cf934 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -377,6 +377,9 @@ }, { "name": "freec84" + }, + { + "name": "Suter, Peter" } ], "keywords": [ From 2bb57b3e8a69a7e4587ab4f2d67842a13188bd8b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 18 Jul 2023 10:06:28 -0400 Subject: [PATCH 308/702] MAINT: Deprecations --- nibabel/__init__.py | 2 +- nibabel/casting.py | 7 +++++++ nibabel/nicom/utils.py | 2 +- nibabel/nifti1.py | 3 +-- nibabel/streamlines/trk.py | 2 +- nibabel/tests/test_openers.py | 4 ++-- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index c08890ac37..09be1d2792 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -39,7 +39,7 @@ # module imports from . import analyze as ana -from . import ecat, imagestats, mriutils +from . import ecat, imagestats, mriutils, orientations from . import nifti1 as ni1 from . import spm2analyze as spm2 from . import spm99analyze as spm99 diff --git a/nibabel/casting.py b/nibabel/casting.py index 6232c615b5..35d833940f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -796,3 +796,10 @@ def ulp(val=np.float64(1.0)): fl2 = info['minexp'] # 'nmant' value does not include implicit first bit return 2 ** (fl2 - info['nmant']) + + +# Ported from np.compat +def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index f62bc72c5a..ad5e794151 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,7 +1,7 @@ """Utilities for working with DICOM datasets """ -from numpy.compat.py3k import asstr +from nibabel.casting import asstr def find_private_section(dcm_data, group_no, creator): diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 07fb177736..1908b9321a 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -17,12 +17,11 @@ import numpy as np import numpy.linalg as npl -from numpy.compat.py3k import asstr from . import analyze # module import from .arrayproxy import get_obj_dtype from .batteryrunners import Report -from .casting import have_binary128 +from .casting import have_binary128, asstr from .deprecated import alert_future_error from .filebasedimages import ImageFileError, SerializableImage from .optpkg import optional_package diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 4f570a2803..d40cb0ed43 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -7,9 +7,9 @@ import warnings import numpy as np -from numpy.compat.py3k import asstr import nibabel as nib +from nibabel.casting import asstr from nibabel.openers import Opener from nibabel.orientations import aff2axcodes, axcodes2ornt from nibabel.volumeutils import endian_codes, native_code, swapped_code diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 893c5f4f88..a048660d24 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -17,9 +17,9 @@ from unittest import mock import pytest -from numpy.compat.py3k import asbytes, asstr from packaging.version import Version +from ..casting import asstr from ..deprecator import ExpiredDeprecationError from ..openers import HAVE_INDEXED_GZIP, BZ2File, DeterministicGzipFile, ImageOpener, Opener from ..optpkg import optional_package @@ -342,7 +342,7 @@ def test_iter(): for input, does_t in files_to_test: with Opener(input, 'wb') as fobj: for line in lines: - fobj.write(asbytes(line + os.linesep)) + fobj.write(bytes(line + os.linesep, 'ascii')) with Opener(input, 'rb') as fobj: for back_line, line in zip(fobj, lines): assert asstr(back_line).rstrip() == line From 28a96399e9d04690dee9af1c8fc5c2cd668b190a Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 18 Jul 2023 13:09:42 -0400 Subject: [PATCH 309/702] FIX: Decode --- nibabel/casting.py | 7 ------- nibabel/nicom/utils.py | 16 ++++++++-------- nibabel/nifti1.py | 7 ++++--- nibabel/streamlines/trk.py | 20 +++++++++++++++----- nibabel/tests/test_openers.py | 3 +-- 5 files changed, 28 insertions(+), 25 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 35d833940f..6232c615b5 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -796,10 +796,3 @@ def ulp(val=np.float64(1.0)): fl2 = info['minexp'] # 'nmant' value does not include implicit first bit return 2 ** (fl2 - info['nmant']) - - -# Ported from np.compat -def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index ad5e794151..0c1182f306 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,8 +1,6 @@ """Utilities for working with DICOM datasets """ -from nibabel.casting import asstr - def find_private_section(dcm_data, group_no, creator): """Return start element in group `group_no` given creator name `creator` @@ -19,10 +17,10 @@ def find_private_section(dcm_data, group_no, creator): ``tag``, ``VR``, ``value`` group_no : int Group number in which to search - creator : str or bytes or regex - Name of section - e.g. 'SIEMENS CSA HEADER' - or regex to search for + creator : bytes or regex + Name of section - e.g. b'SIEMENS CSA HEADER' - or regex to search for section name. Regex used via ``creator.search(element_value)`` where - ``element_value`` is the value of the data element. + ``element_value`` is the decoded value of the data element. Returns ------- @@ -31,8 +29,9 @@ def find_private_section(dcm_data, group_no, creator): """ if hasattr(creator, 'search'): match_func = creator.search - else: # assume string / bytes - match_func = asstr(creator).__eq__ + else: # assume bytes + creator = creator.decode('latin-1') + match_func = creator.__eq__ # Group elements assumed ordered by tag (groupno, elno) for element in dcm_data.group_dataset(group_no): elno = element.tag.elem @@ -40,6 +39,7 @@ def find_private_section(dcm_data, group_no, creator): break if element.VR not in ('LO', 'OB'): continue - if match_func(asstr(element.value)): + val = element.value.decode('latin-1') + if match_func(val): return elno * 0x100 return None diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 1908b9321a..ae43a4f1c6 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -21,7 +21,7 @@ from . import analyze # module import from .arrayproxy import get_obj_dtype from .batteryrunners import Report -from .casting import have_binary128, asstr +from .casting import have_binary128 from .deprecated import alert_future_error from .filebasedimages import ImageFileError, SerializableImage from .optpkg import optional_package @@ -1404,7 +1404,7 @@ def get_intent(self, code_repr='label'): raise TypeError('repr can be "label" or "code"') n_params = len(recoder.parameters[code]) if known_intent else 0 params = (float(hdr['intent_p%d' % (i + 1)]) for i in range(n_params)) - name = asstr(hdr['intent_name'].item()) + name = hdr['intent_name'].item().decode('latin-1') return label, tuple(params), name def set_intent(self, code, params=(), name='', allow_unknown=False): @@ -1740,7 +1740,8 @@ def _chk_magic(hdr, fix=False): magic = hdr['magic'].item() if magic in (hdr.pair_magic, hdr.single_magic): return hdr, rep - rep.problem_msg = f'magic string "{asstr(magic)}" is not valid' + magic = magic.decode('latin-1') + rep.problem_msg = f'magic string "{magic}" is not valid' rep.problem_level = 45 if fix: rep.fix_msg = 'leaving as is, but future errors are likely' diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index d40cb0ed43..2a4cc61453 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -9,7 +9,6 @@ import numpy as np import nibabel as nib -from nibabel.casting import asstr from nibabel.openers import Opener from nibabel.orientations import aff2axcodes, axcodes2ornt from nibabel.volumeutils import endian_codes, native_code, swapped_code @@ -180,7 +179,7 @@ def decode_value_from_name(encoded_name): value : int Value decoded from the name. """ - encoded_name = asstr(encoded_name) + encoded_name = encoded_name.decode('latin1') if len(encoded_name) == 0: return encoded_name, 0 @@ -740,14 +739,25 @@ def __str__(self): vars[attr] = vars[hdr_field] nb_scalars = self.header[Field.NB_SCALARS_PER_POINT] - scalar_names = [asstr(s) for s in vars['scalar_name'][:nb_scalars] if len(s) > 0] + scalar_names = [ + s.decode('latin-1') + for s in vars['scalar_name'][:nb_scalars] + if len(s) > 0 + ] vars['scalar_names'] = '\n '.join(scalar_names) nb_properties = self.header[Field.NB_PROPERTIES_PER_STREAMLINE] - property_names = [asstr(s) for s in vars['property_name'][:nb_properties] if len(s) > 0] + property_names = [ + s.decode('latin-1') + for s in vars['property_name'][:nb_properties] + if len(s) > 0 + ] vars['property_names'] = '\n '.join(property_names) # Make all byte strings into strings # Fixes recursion error on Python 3.3 - vars = {k: asstr(v) if hasattr(v, 'decode') else v for k, v in vars.items()} + vars = { + k: v.decode('latin-1') if hasattr(v, 'decode') else v + for k, v in vars.items() + } return """\ MAGIC NUMBER: {MAGIC_NUMBER} v.{version} diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index a048660d24..f6efdeef22 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -19,7 +19,6 @@ import pytest from packaging.version import Version -from ..casting import asstr from ..deprecator import ExpiredDeprecationError from ..openers import HAVE_INDEXED_GZIP, BZ2File, DeterministicGzipFile, ImageOpener, Opener from ..optpkg import optional_package @@ -345,7 +344,7 @@ def test_iter(): fobj.write(bytes(line + os.linesep, 'ascii')) with Opener(input, 'rb') as fobj: for back_line, line in zip(fobj, lines): - assert asstr(back_line).rstrip() == line + assert back_line.decode('latin-1').rstrip() == line if not does_t: continue with Opener(input, 'rt') as fobj: From 410b8101addbef1a98f21c3259d835f0aa7669f9 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 18 Jul 2023 13:35:31 -0400 Subject: [PATCH 310/702] FIX: str --- nibabel/nicom/utils.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 0c1182f306..21b6507655 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -17,7 +17,7 @@ def find_private_section(dcm_data, group_no, creator): ``tag``, ``VR``, ``value`` group_no : int Group number in which to search - creator : bytes or regex + creator : str or regex Name of section - e.g. b'SIEMENS CSA HEADER' - or regex to search for section name. Regex used via ``creator.search(element_value)`` where ``element_value`` is the decoded value of the data element. @@ -29,8 +29,7 @@ def find_private_section(dcm_data, group_no, creator): """ if hasattr(creator, 'search'): match_func = creator.search - else: # assume bytes - creator = creator.decode('latin-1') + else: # assume str match_func = creator.__eq__ # Group elements assumed ordered by tag (groupno, elno) for element in dcm_data.group_dataset(group_no): @@ -39,7 +38,7 @@ def find_private_section(dcm_data, group_no, creator): break if element.VR not in ('LO', 'OB'): continue - val = element.value.decode('latin-1') + val = element.value if match_func(val): return elno * 0x100 return None From c8c3cf92a06fb1db11cf9dccd47a33aec7606546 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 18 Jul 2023 13:49:33 -0400 Subject: [PATCH 311/702] FIX: Revert --- nibabel/nicom/utils.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 21b6507655..617ff2a28a 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -17,10 +17,10 @@ def find_private_section(dcm_data, group_no, creator): ``tag``, ``VR``, ``value`` group_no : int Group number in which to search - creator : str or regex - Name of section - e.g. b'SIEMENS CSA HEADER' - or regex to search for + creator : str or bytes or regex + Name of section - e.g. 'SIEMENS CSA HEADER' - or regex to search for section name. Regex used via ``creator.search(element_value)`` where - ``element_value`` is the decoded value of the data element. + ``element_value`` is the value of the data element. Returns ------- @@ -29,7 +29,9 @@ def find_private_section(dcm_data, group_no, creator): """ if hasattr(creator, 'search'): match_func = creator.search - else: # assume str + else: + if isinstance(creator, bytes): + creator = creator.decode('latin-1') match_func = creator.__eq__ # Group elements assumed ordered by tag (groupno, elno) for element in dcm_data.group_dataset(group_no): @@ -39,6 +41,8 @@ def find_private_section(dcm_data, group_no, creator): if element.VR not in ('LO', 'OB'): continue val = element.value + if isinstance(val, bytes): + val = val.decode('latin-1') if match_func(val): return elno * 0x100 return None From 2882ef10902a6b5f227e82c07ad0f35212a213c1 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 19 Jul 2023 22:04:18 +0200 Subject: [PATCH 312/702] DOC: Fix typos found by codespell --- doc/source/dicom/dicom_intro.rst | 10 +++++----- doc/source/external/nifti1.h | 2 +- doc/source/old/format_design.txt | 2 +- nibabel/cifti2/cifti2_axes.py | 2 +- nibabel/tests/test_loadsave.py | 2 +- nibabel/tests/test_nifti1.py | 2 +- nibabel/tests/test_processing.py | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/source/dicom/dicom_intro.rst b/doc/source/dicom/dicom_intro.rst index f1508932c6..e618153396 100644 --- a/doc/source/dicom/dicom_intro.rst +++ b/doc/source/dicom/dicom_intro.rst @@ -228,22 +228,22 @@ Here is the start of the relevant section from PS 3.5: 7.8.1 PRIVATE DATA ELEMENT TAGS - It is possible that multiple implementors may define Private Elements with the + It is possible that multiple implementers may define Private Elements with the same (odd) group number. To avoid conflicts, Private Elements shall be assigned Private Data Element Tags according to the following rules. a) Private Creator Data Elements numbered (gggg,0010-00FF) (gggg is odd) shall be used to reserve a block of Elements with Group Number gggg for use by an - individual implementor. The implementor shall insert an identification code + individual implementer. The implementer shall insert an identification code in the first unused (unassigned) Element in this series to reserve a block of Private Elements. The VR of the private identification code shall be LO (Long String) and the VM shall be equal to 1. b) Private Creator Data Element (gggg,0010), is a Type 1 Data Element that - identifies the implementor reserving element (gggg,1000-10FF), Private Creator - Data Element (gggg,0011) identifies the implementor reserving elements + identifies the implementer reserving element (gggg,1000-10FF), Private Creator + Data Element (gggg,0011) identifies the implementer reserving elements (gggg,1100-11FF), and so on, until Private Creator Data Element (gggg,00FF) - identifies the implementor reserving elements (gggg,FF00- FFFF). + identifies the implementer reserving elements (gggg,FF00- FFFF). c) Encoders of Private Data Elements shall be able to dynamically assign private data to any available (unreserved) block(s) within the Private group, diff --git a/doc/source/external/nifti1.h b/doc/source/external/nifti1.h index 80066fb347..dce3a88c1a 100644 --- a/doc/source/external/nifti1.h +++ b/doc/source/external/nifti1.h @@ -869,7 +869,7 @@ typedef struct { unsigned char r,g,b; } rgb_byte ; as a displacement field or vector: - dataset must have a 5th dimension - intent_code must be NIFTI_INTENT_DISPVECT - - dim[5] must be the dimensionality of the displacment + - dim[5] must be the dimensionality of the displacement vector (e.g., 3 for spatial displacement, 2 for in-plane) */ #define NIFTI_INTENT_DISPVECT 1006 /* specifically for displacements */ diff --git a/doc/source/old/format_design.txt b/doc/source/old/format_design.txt index 29585866a9..fdbf9419ba 100644 --- a/doc/source/old/format_design.txt +++ b/doc/source/old/format_design.txt @@ -13,7 +13,7 @@ The Image and the Format objects form a `bridge pattern diagram `_ the Image class plays the role of the Abstraction, and the Format plays the -role of the implementor. +role of the implementer. The Format object provides an interface to the underlying file format. diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 0c75190f80..bc6069a160 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -59,7 +59,7 @@ In this very simple case ``bm_cortex`` describes a left cortical surface skipping the second out of four vertices. ``bm_thal`` contains all voxels in a 2x2x2 volume. -Brain structure names automatically get converted to valid CIFTI-2 indentifiers using +Brain structure names automatically get converted to valid CIFTI-2 identifiers using :meth:`BrainModelAxis.to_cifti_brain_structure_name`. A 1-dimensional mask will be automatically interpreted as a surface element and a 3-dimensional mask as a volume element. diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index de1d818039..4071b09f72 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -192,7 +192,7 @@ def test_read_img_data_nifti(): assert_array_equal(actual_unscaled, read_img_data(img_back, prefer='unscaled')) # Check the offset too img.header.set_data_offset(1024) - # Delete arrays still pointing to file, so Windows can re-use + # Delete arrays still pointing to file, so Windows can reuse del actual_unscaled, unscaled_back img.to_file_map() # Write an integer of zeros after diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 7b7f44fe0b..c7df6911ae 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -1169,7 +1169,7 @@ def test_dynamic_dtype_aliases(self): assert img.get_data_dtype() == alias img_rt = bytesio_round_trip(img) assert img_rt.get_data_dtype() == effective_dt - # Seralizing does not finalize the source image + # Serializing does not finalize the source image assert img.get_data_dtype() == alias def test_static_dtype_aliases(self): diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index ffd1fbff2b..27da6639c0 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -222,7 +222,7 @@ def test_resample_from_to(caplog): @needs_scipy def test_resample_to_output(caplog): - # Test routine to sample iamges to output space + # Test routine to sample images to output space # Image aligned to output axes - no-op data = np.arange(24, dtype='int32').reshape((2, 3, 4)) img = Nifti1Image(data, np.eye(4)) From 8102aa7146ee647fd52544a54525765c135276f7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 4 Aug 2023 10:21:58 -0400 Subject: [PATCH 313/702] RF: Re-consolidate nifti error message --- nibabel/nifti1.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index ae43a4f1c6..c1b0124ebb 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1740,8 +1740,7 @@ def _chk_magic(hdr, fix=False): magic = hdr['magic'].item() if magic in (hdr.pair_magic, hdr.single_magic): return hdr, rep - magic = magic.decode('latin-1') - rep.problem_msg = f'magic string "{magic}" is not valid' + rep.problem_msg = f'magic string {magic.decode("latin1")!r} is not valid' rep.problem_level = 45 if fix: rep.fix_msg = 'leaving as is, but future errors are likely' From c7fdde50c38029c4cb9366c64c10c3f608d87b30 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 4 Aug 2023 10:22:29 -0400 Subject: [PATCH 314/702] STY: blue --- nibabel/streamlines/trk.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 2a4cc61453..04ac56a51d 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -740,24 +740,17 @@ def __str__(self): nb_scalars = self.header[Field.NB_SCALARS_PER_POINT] scalar_names = [ - s.decode('latin-1') - for s in vars['scalar_name'][:nb_scalars] - if len(s) > 0 + s.decode('latin-1') for s in vars['scalar_name'][:nb_scalars] if len(s) > 0 ] vars['scalar_names'] = '\n '.join(scalar_names) nb_properties = self.header[Field.NB_PROPERTIES_PER_STREAMLINE] property_names = [ - s.decode('latin-1') - for s in vars['property_name'][:nb_properties] - if len(s) > 0 + s.decode('latin-1') for s in vars['property_name'][:nb_properties] if len(s) > 0 ] vars['property_names'] = '\n '.join(property_names) # Make all byte strings into strings # Fixes recursion error on Python 3.3 - vars = { - k: v.decode('latin-1') if hasattr(v, 'decode') else v - for k, v in vars.items() - } + vars = {k: v.decode('latin-1') if hasattr(v, 'decode') else v for k, v in vars.items()} return """\ MAGIC NUMBER: {MAGIC_NUMBER} v.{version} From cd2ba2f08c3650928bc9f482af5c1c523fb87062 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 4 Aug 2023 10:23:32 -0400 Subject: [PATCH 315/702] TEST: Use standard encode/decode --- nibabel/tests/test_openers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index f6efdeef22..0d150a145c 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -341,10 +341,10 @@ def test_iter(): for input, does_t in files_to_test: with Opener(input, 'wb') as fobj: for line in lines: - fobj.write(bytes(line + os.linesep, 'ascii')) + fobj.write(str.encode(line + os.linesep)) with Opener(input, 'rb') as fobj: for back_line, line in zip(fobj, lines): - assert back_line.decode('latin-1').rstrip() == line + assert back_line.decode().rstrip() == line if not does_t: continue with Opener(input, 'rt') as fobj: From 5bb4d4cd67bf1ff0895f814d06445d6ba4c449ff Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 4 Aug 2023 10:33:07 -0400 Subject: [PATCH 316/702] TEST: Switch to single quotes for expected magic errors --- nibabel/tests/test_nifti1.py | 2 +- nibabel/tests/test_scripts.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 7b7f44fe0b..1031c6c1aa 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -251,7 +251,7 @@ def test_magic_offset_checks(self): fhdr, message, raiser = self.log_chk(hdr, 45) assert fhdr['magic'] == b'ooh' assert ( - message == 'magic string "ooh" is not valid; ' + message == "magic string 'ooh' is not valid; " 'leaving as is, but future errors are likely' ) # For pairs, any offset is OK, but should be divisible by 16 diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index e875065c8d..cc4bb468ad 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -228,7 +228,7 @@ def test_nib_nifti_dx(): expected = f"""Picky header check output for "{dirty_hdr}" pixdim[0] (qfac) should be 1 (default) or -1 -magic string "" is not valid +magic string '' is not valid sform_code 11776 not valid""" # Split strings to remove line endings assert stdout == expected From 2623523bd00560fecb622210dbb87b434edd0f0d Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 28 Aug 2023 15:35:29 +0200 Subject: [PATCH 317/702] replace np.sctypes with np.core.sctypes --- nibabel/casting.py | 6 +++--- nibabel/tests/test_arraywriters.py | 8 ++++---- nibabel/tests/test_casting.py | 14 +++++++------- nibabel/tests/test_floating.py | 4 ++-- nibabel/tests/test_image_api.py | 2 +- nibabel/tests/test_proxy_api.py | 4 +++- nibabel/tests/test_round_trip.py | 2 +- nibabel/tests/test_scaling.py | 6 +++--- nibabel/tests/test_spm99analyze.py | 2 +- nibabel/tests/test_testing.py | 2 +- nibabel/tests/test_volumeutils.py | 14 +++++++------- nibabel/tests/test_wrapstruct.py | 2 +- 12 files changed, 34 insertions(+), 32 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 6232c615b5..7172d1cbf7 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -714,7 +714,7 @@ def ok_floats(): Remove longdouble if it has no higher precision than float64 """ # copy float list so we don't change the numpy global - floats = np.sctypes['float'][:] + floats = np.core.sctypes['float'][:] if best_float() != np.longdouble and np.longdouble in floats: floats.remove(np.longdouble) return sorted(floats, key=lambda f: type_info(f)['nmant']) @@ -750,10 +750,10 @@ def able_int_type(values): mn = min(values) mx = max(values) if mn >= 0: - for ityp in np.sctypes['uint']: + for ityp in np.core.sctypes['uint']: if mx <= np.iinfo(ityp).max: return ityp - for ityp in np.sctypes['int']: + for ityp in np.core.sctypes['int']: info = np.iinfo(ityp) if mn >= info.min and mx <= info.max: return ityp diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index e77c2fd11f..68f661dbe5 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -24,10 +24,10 @@ from ..testing import assert_allclose_safely, suppress_warnings from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file -FLOAT_TYPES = np.sctypes['float'] -COMPLEX_TYPES = np.sctypes['complex'] -INT_TYPES = np.sctypes['int'] -UINT_TYPES = np.sctypes['uint'] +FLOAT_TYPES = np.core.sctypes['float'] +COMPLEX_TYPES = np.core.sctypes['complex'] +INT_TYPES = np.core.sctypes['int'] +UINT_TYPES = np.core.sctypes['uint'] CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES IUINT_TYPES = INT_TYPES + UINT_TYPES NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index a082394b7b..54e1fccaa4 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -24,8 +24,8 @@ def test_shared_range(): - for ft in np.sctypes['float']: - for it in np.sctypes['int'] + np.sctypes['uint']: + for ft in np.core.sctypes['float']: + for it in np.core.sctypes['int'] + np.core.sctypes['uint']: # Test that going a bit above or below the calculated min and max # either generates the same number when cast, or the max int value # (if this system generates that) or something smaller (because of @@ -54,7 +54,7 @@ def test_shared_range(): assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax)) else: assert np.all(bit_bigger <= casted_mx) - if it in np.sctypes['uint']: + if it in np.core.sctypes['uint']: assert mn == 0 continue # And something larger for the minimum @@ -90,8 +90,8 @@ def test_shared_range_inputs(): def test_casting(): - for ft in np.sctypes['float']: - for it in np.sctypes['int'] + np.sctypes['uint']: + for ft in np.core.sctypes['float']: + for it in np.core.sctypes['int'] + np.core.sctypes['uint']: ii = np.iinfo(it) arr = [ii.min - 1, ii.max + 1, -np.inf, np.inf, np.nan, 0.2, 10.6] farr_orig = np.array(arr, dtype=ft) @@ -140,7 +140,7 @@ def test_casting(): def test_int_abs(): - for itype in np.sctypes['int']: + for itype in np.core.sctypes['int']: info = np.iinfo(itype) in_arr = np.array([info.min, info.max], dtype=itype) idtype = np.dtype(itype) @@ -188,7 +188,7 @@ def test_able_int_type(): def test_able_casting(): # Check the able_int_type function guesses numpy out type - types = np.sctypes['int'] + np.sctypes['uint'] + types = np.core.sctypes['int'] + np.core.sctypes['uint'] for in_type in types: in_info = np.iinfo(in_type) in_mn, in_mx = in_info.min, in_info.max diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index a06c180b84..e26e6a403f 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -43,7 +43,7 @@ def dtt2dict(dtt): def test_type_info(): # Test routine to get min, max, nmant, nexp - for dtt in np.sctypes['int'] + np.sctypes['uint']: + for dtt in np.core.sctypes['int'] + np.core.sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) assert infod == dict( @@ -212,7 +212,7 @@ def test_int_to_float(): def test_as_int_np_fix(): # Test as_int works for integers. We need as_int for integers because of a # numpy 1.4.1 bug such that int(np.uint32(2**32-1) == -1 - for t in np.sctypes['int'] + np.sctypes['uint']: + for t in np.core.sctypes['int'] + np.core.sctypes['uint']: info = np.iinfo(t) mn, mx = np.array([info.min, info.max], dtype=t) assert (mn, mx) == (as_int(mn), as_int(mx)) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 091bc57e8c..a57720b588 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -403,7 +403,7 @@ def _check_array_caching(self, imaker, meth_name, caching): return # Return original array from get_fdata only if the input array is the # requested dtype. - float_types = np.sctypes['float'] + float_types = np.core.sctypes['float'] if arr_dtype not in float_types: return for float_type in float_types: diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 1c9e02186c..5aa3eef7d5 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -146,7 +146,9 @@ def validate_array_interface_with_dtype(self, pmaker, params): context.__enter__() warnings.simplefilter('ignore', np.ComplexWarning) - for dtype in np.sctypes['float'] + np.sctypes['int'] + np.sctypes['uint']: + for dtype in ( + np.core.sctypes["float"] + np.core.sctypes["int"] + np.core.sctypes["uint"] + ): # Directly coerce with a dtype direct = dtype(prox) # Half-precision is imprecise. Obviously. It's a bad idea, but don't break diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index cb754d0b54..5dc4ee8c8e 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -102,7 +102,7 @@ def test_round_trip(): rng = np.random.RandomState(20111121) N = 10000 sd_10s = range(-20, 51, 5) - iuint_types = np.sctypes['int'] + np.sctypes['uint'] + iuint_types = np.core.sctypes['int'] + np.core.sctypes['uint'] # Remove types which cannot be set into nifti header datatype nifti_supported = supported_np_types(Nifti1Header()) iuint_types = [t for t in iuint_types if t in nifti_supported] diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index 2fbe88a1a7..0d0cbf47b9 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -177,8 +177,8 @@ def test_array_file_scales(in_type, out_type): ], ) def test_scaling_in_abstract(category0, category1, overflow): - for in_type in np.sctypes[category0]: - for out_type in np.sctypes[category1]: + for in_type in np.core.sctypes[category0]: + for out_type in np.core.sctypes[category1]: if overflow: with suppress_warnings(): check_int_a2f(in_type, out_type) @@ -191,7 +191,7 @@ def check_int_a2f(in_type, out_type): big_floater = np.maximum_sctype(np.float64) info = type_info(in_type) this_min, this_max = info['min'], info['max'] - if not in_type in np.sctypes['complex']: + if not in_type in np.core.sctypes['complex']: data = np.array([this_min, this_max], in_type) # Bug in numpy 1.6.2 on PPC leading to infs - abort if not np.all(np.isfinite(data)): diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index a8756e3013..9a3531d49c 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -35,7 +35,7 @@ from ..volumeutils import _dt_min_max, apply_read_scaling from . import test_analyze -# np.sctypes values are lists of types with unique sizes +# np.core.sctypes values are lists of types with unique sizes # For testing, we want all concrete classes of a type # Key on kind, rather than abstract base classes, since timedelta64 is a signedinteger sctypes = {} diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 8cd70e37a9..ec3ec95004 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -48,7 +48,7 @@ def test_assert_allclose_safely(): with pytest.raises(AssertionError): assert_allclose_safely(a, b) # Test allcloseness of inf, especially np.float128 infs - for dtt in np.sctypes['float']: + for dtt in np.core.sctypes['float']: a = np.array([-np.inf, 1, np.inf], dtype=dtt) b = np.array([-np.inf, 1, np.inf], dtype=dtt) assert_allclose_safely(a, b) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index ab5bd38ee6..fef51ec296 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -60,11 +60,11 @@ pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') #: convenience variables for numpy types -FLOAT_TYPES = np.sctypes['float'] -COMPLEX_TYPES = np.sctypes['complex'] +FLOAT_TYPES = np.core.sctypes['float'] +COMPLEX_TYPES = np.core.sctypes['complex'] CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES -INT_TYPES = np.sctypes['int'] -IUINT_TYPES = INT_TYPES + np.sctypes['uint'] +INT_TYPES = np.core.sctypes['int'] +IUINT_TYPES = INT_TYPES + np.core.sctypes['uint'] NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES FP_RUNTIME_WARN = Version(np.__version__) >= Version('1.24.0.dev0+239') @@ -597,7 +597,7 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([np.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) + NUMERICAL_TYPES = sum([np.core.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, NUMERICAL_TYPES, @@ -830,10 +830,10 @@ def check_against(f1, f2): return f1 if FLOAT_TYPES.index(f1) >= FLOAT_TYPES.index(f2) else f2 for first in FLOAT_TYPES: - for other in IUINT_TYPES + np.sctypes['complex']: + for other in IUINT_TYPES + np.core.sctypes['complex']: assert better_float_of(first, other) == first assert better_float_of(other, first) == first - for other2 in IUINT_TYPES + np.sctypes['complex']: + for other2 in IUINT_TYPES + np.core.sctypes['complex']: assert better_float_of(other, other2) == np.float32 assert better_float_of(other, other2, np.float64) == np.float64 for second in FLOAT_TYPES: diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 70f22894ad..73f19e894d 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -36,7 +36,7 @@ from ..volumeutils import Recoder, native_code, swapped_code from ..wrapstruct import LabeledWrapStruct, WrapStruct, WrapStructError -INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] +INTEGER_TYPES = np.core.sctypes['int'] + np.core.sctypes['uint'] def log_chk(hdr, level): From d5501527d0f45f8ed5501360022e9b247f8e04dc Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 28 Aug 2023 15:35:35 +0200 Subject: [PATCH 318/702] rm unused imports --- nibabel/tests/test_arraywriters.py | 2 +- nibabel/tests/test_casting.py | 2 +- nibabel/tests/test_spm99analyze.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 68f661dbe5..007c47240b 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -20,7 +20,7 @@ get_slope_inter, make_array_writer, ) -from ..casting import int_abs, on_powerpc, shared_range, type_info +from ..casting import int_abs, shared_range, type_info from ..testing import assert_allclose_safely, suppress_warnings from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index 54e1fccaa4..2e7592523c 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_equal from ..casting import ( CastingError, diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 9a3531d49c..ccc1a80329 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -24,7 +24,7 @@ needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') from ..casting import shared_range, type_info -from ..spatialimages import HeaderDataError, supported_np_types +from ..spatialimages import HeaderDataError from ..spm99analyze import HeaderTypeError, Spm99AnalyzeHeader, Spm99AnalyzeImage from ..testing import ( assert_allclose_safely, From 6e873c616aaeba421f022980c0da593a560d8f49 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 28 Aug 2023 17:29:31 +0200 Subject: [PATCH 319/702] run blue instead of black --- nibabel/tests/test_proxy_api.py | 4 +--- nibabel/tests/test_volumeutils.py | 4 +++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 5aa3eef7d5..4032f05c61 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -146,9 +146,7 @@ def validate_array_interface_with_dtype(self, pmaker, params): context.__enter__() warnings.simplefilter('ignore', np.ComplexWarning) - for dtype in ( - np.core.sctypes["float"] + np.core.sctypes["int"] + np.core.sctypes["uint"] - ): + for dtype in np.core.sctypes['float'] + np.core.sctypes['int'] + np.core.sctypes['uint']: # Directly coerce with a dtype direct = dtype(prox) # Half-precision is imprecise. Obviously. It's a bad idea, but don't break diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index fef51ec296..2281820835 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -597,7 +597,9 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([np.core.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) + NUMERICAL_TYPES = sum( + [np.core.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], [] + ) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, NUMERICAL_TYPES, From 0f5ad6efae3017de06d906a7a31858e4a7926011 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 28 Aug 2023 17:33:02 +0200 Subject: [PATCH 320/702] add author entries --- .zenodo.json | 5 +++++ doc/source/index.rst | 1 + 2 files changed, 6 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index d79c0cf934..b96c102349 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -381,6 +381,11 @@ { "name": "Suter, Peter" } + { + "affiliation": "Human Neuroscience Platform, Fondation Campus Biotech Geneva, Geneva, Switzerland", + "name": "Mathieu Scheltienne", + "orcid": "0000-0001-8316-7436" + }, ], "keywords": [ "neuroimaging" diff --git a/doc/source/index.rst b/doc/source/index.rst index 48db1d31a4..65e1aded4c 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -125,6 +125,7 @@ contributed code and discussion (in rough order of appearance): * Jacob Roberts * Horea Christian * Fabian Perez +* Mathieu Scheltienne License reprise =============== From 1dac328d53bea77b904588814496a8da7ef89555 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 13:24:18 +0200 Subject: [PATCH 321/702] manually define the mapping between str and scalar types in casting.py --- nibabel/casting.py | 15 ++++++++++++--- nibabel/tests/test_arraywriters.py | 10 +++++----- nibabel/tests/test_casting.py | 15 ++++++++------- nibabel/tests/test_floating.py | 5 +++-- nibabel/tests/test_image_api.py | 2 +- nibabel/tests/test_proxy_api.py | 4 ++-- nibabel/tests/test_round_trip.py | 4 ++-- nibabel/tests/test_scaling.py | 8 ++++---- nibabel/tests/test_testing.py | 2 +- nibabel/tests/test_volumeutils.py | 18 +++++++++--------- nibabel/tests/test_wrapstruct.py | 3 ++- 11 files changed, 49 insertions(+), 37 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 7172d1cbf7..8adbe28307 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -23,6 +23,15 @@ class CastingError(Exception): _test_val = 2**63 + 2**11 # Should be exactly representable in float64 TRUNC_UINT64 = np.float64(_test_val).astype(np.uint64) != _test_val +# np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead. +sctypes = { + "int": [np.int8, np.int16, np.int32, np.int64], + "uint": [np.uint8, np.uint16, np.uint32, np.uint64], + "float": [np.float16, np.float32, np.float64, np.float128], + "complex": [np.complex64, np.complex128, np.complex256], + "others": [bool, object, bytes, str, np.void], +} + def float_to_int(arr, int_type, nan2zero=True, infmax=False): """Convert floating point array `arr` to type `int_type` @@ -714,7 +723,7 @@ def ok_floats(): Remove longdouble if it has no higher precision than float64 """ # copy float list so we don't change the numpy global - floats = np.core.sctypes['float'][:] + floats = sctypes['float'][:] if best_float() != np.longdouble and np.longdouble in floats: floats.remove(np.longdouble) return sorted(floats, key=lambda f: type_info(f)['nmant']) @@ -750,10 +759,10 @@ def able_int_type(values): mn = min(values) mx = max(values) if mn >= 0: - for ityp in np.core.sctypes['uint']: + for ityp in sctypes['uint']: if mx <= np.iinfo(ityp).max: return ityp - for ityp in np.core.sctypes['int']: + for ityp in sctypes['int']: info = np.iinfo(ityp) if mn >= info.min and mx <= info.max: return ityp diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 007c47240b..b0cace66a2 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -20,14 +20,14 @@ get_slope_inter, make_array_writer, ) -from ..casting import int_abs, shared_range, type_info +from ..casting import int_abs, sctypes, shared_range, type_info from ..testing import assert_allclose_safely, suppress_warnings from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file -FLOAT_TYPES = np.core.sctypes['float'] -COMPLEX_TYPES = np.core.sctypes['complex'] -INT_TYPES = np.core.sctypes['int'] -UINT_TYPES = np.core.sctypes['uint'] +FLOAT_TYPES = sctypes['float'] +COMPLEX_TYPES = sctypes['complex'] +INT_TYPES = sctypes['int'] +UINT_TYPES = sctypes['uint'] CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES IUINT_TYPES = INT_TYPES + UINT_TYPES NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index 2e7592523c..d04b996bb6 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -17,6 +17,7 @@ int_abs, int_to_float, longdouble_precision_improved, + sctypes, shared_range, ulp, ) @@ -24,8 +25,8 @@ def test_shared_range(): - for ft in np.core.sctypes['float']: - for it in np.core.sctypes['int'] + np.core.sctypes['uint']: + for ft in sctypes['float']: + for it in sctypes['int'] + sctypes['uint']: # Test that going a bit above or below the calculated min and max # either generates the same number when cast, or the max int value # (if this system generates that) or something smaller (because of @@ -54,7 +55,7 @@ def test_shared_range(): assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax)) else: assert np.all(bit_bigger <= casted_mx) - if it in np.core.sctypes['uint']: + if it in sctypes['uint']: assert mn == 0 continue # And something larger for the minimum @@ -90,8 +91,8 @@ def test_shared_range_inputs(): def test_casting(): - for ft in np.core.sctypes['float']: - for it in np.core.sctypes['int'] + np.core.sctypes['uint']: + for ft in sctypes['float']: + for it in sctypes['int'] + sctypes['uint']: ii = np.iinfo(it) arr = [ii.min - 1, ii.max + 1, -np.inf, np.inf, np.nan, 0.2, 10.6] farr_orig = np.array(arr, dtype=ft) @@ -140,7 +141,7 @@ def test_casting(): def test_int_abs(): - for itype in np.core.sctypes['int']: + for itype in sctypes['int']: info = np.iinfo(itype) in_arr = np.array([info.min, info.max], dtype=itype) idtype = np.dtype(itype) @@ -188,7 +189,7 @@ def test_able_int_type(): def test_able_casting(): # Check the able_int_type function guesses numpy out type - types = np.core.sctypes['int'] + np.core.sctypes['uint'] + types = sctypes['int'] + sctypes['uint'] for in_type in types: in_info = np.iinfo(in_type) in_mn, in_mx = in_info.min, in_info.max diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index e26e6a403f..73e2ed5cc4 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -18,6 +18,7 @@ longdouble_precision_improved, ok_floats, on_powerpc, + sctypes, type_info, ) from ..testing import suppress_warnings @@ -43,7 +44,7 @@ def dtt2dict(dtt): def test_type_info(): # Test routine to get min, max, nmant, nexp - for dtt in np.core.sctypes['int'] + np.core.sctypes['uint']: + for dtt in sctypes['int'] + sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) assert infod == dict( @@ -212,7 +213,7 @@ def test_int_to_float(): def test_as_int_np_fix(): # Test as_int works for integers. We need as_int for integers because of a # numpy 1.4.1 bug such that int(np.uint32(2**32-1) == -1 - for t in np.core.sctypes['int'] + np.core.sctypes['uint']: + for t in sctypes['int'] + sctypes['uint']: info = np.iinfo(t) mn, mx = np.array([info.min, info.max], dtype=t) assert (mn, mx) == (as_int(mn), as_int(mx)) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index a57720b588..cb39ee747f 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -403,7 +403,7 @@ def _check_array_caching(self, imaker, meth_name, caching): return # Return original array from get_fdata only if the input array is the # requested dtype. - float_types = np.core.sctypes['float'] + float_types = sctypes['float'] if arr_dtype not in float_types: return for float_type in float_types: diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 4032f05c61..004d447e35 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -41,8 +41,8 @@ from .. import ecat, minc1, minc2, parrec from ..analyze import AnalyzeHeader from ..arrayproxy import ArrayProxy, is_proxy -from ..casting import have_binary128 from ..deprecator import ExpiredDeprecationError +from ..casting import have_binary128, sctypes from ..externals.netcdf import netcdf_file from ..freesurfer.mghformat import MGHHeader from ..nifti1 import Nifti1Header @@ -146,7 +146,7 @@ def validate_array_interface_with_dtype(self, pmaker, params): context.__enter__() warnings.simplefilter('ignore', np.ComplexWarning) - for dtype in np.core.sctypes['float'] + np.core.sctypes['int'] + np.core.sctypes['uint']: + for dtype in sctypes['float'] + sctypes['int'] + sctypes['uint']: # Directly coerce with a dtype direct = dtype(prox) # Half-precision is imprecise. Obviously. It's a bad idea, but don't break diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 5dc4ee8c8e..07783fe550 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -10,7 +10,7 @@ from .. import Nifti1Header, Nifti1Image from ..arraywriters import ScalingError -from ..casting import best_float, type_info, ulp +from ..casting import best_float, sctypes, type_info, ulp from ..spatialimages import HeaderDataError, supported_np_types DEBUG = False @@ -102,7 +102,7 @@ def test_round_trip(): rng = np.random.RandomState(20111121) N = 10000 sd_10s = range(-20, 51, 5) - iuint_types = np.core.sctypes['int'] + np.core.sctypes['uint'] + iuint_types = sctypes['int'] + sctypes['uint'] # Remove types which cannot be set into nifti header datatype nifti_supported = supported_np_types(Nifti1Header()) iuint_types = [t for t in iuint_types if t in nifti_supported] diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index 0d0cbf47b9..6cde5a5aa1 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -15,7 +15,7 @@ import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal -from ..casting import type_info +from ..casting import sctypes, type_info from ..testing import suppress_warnings from ..volumeutils import apply_read_scaling, array_from_file, array_to_file, finite_range from .test_volumeutils import _calculate_scale @@ -177,8 +177,8 @@ def test_array_file_scales(in_type, out_type): ], ) def test_scaling_in_abstract(category0, category1, overflow): - for in_type in np.core.sctypes[category0]: - for out_type in np.core.sctypes[category1]: + for in_type in sctypes[category0]: + for out_type in sctypes[category1]: if overflow: with suppress_warnings(): check_int_a2f(in_type, out_type) @@ -191,7 +191,7 @@ def check_int_a2f(in_type, out_type): big_floater = np.maximum_sctype(np.float64) info = type_info(in_type) this_min, this_max = info['min'], info['max'] - if not in_type in np.core.sctypes['complex']: + if not in_type in sctypes['complex']: data = np.array([this_min, this_max], in_type) # Bug in numpy 1.6.2 on PPC leading to infs - abort if not np.all(np.isfinite(data)): diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index ec3ec95004..e97f53d5c1 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -48,7 +48,7 @@ def test_assert_allclose_safely(): with pytest.raises(AssertionError): assert_allclose_safely(a, b) # Test allcloseness of inf, especially np.float128 infs - for dtt in np.core.sctypes['float']: + for dtt in sctypes['float']: a = np.array([-np.inf, 1, np.inf], dtype=dtt) b = np.array([-np.inf, 1, np.inf], dtype=dtt) assert_allclose_safely(a, b) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 2281820835..06e2c4c766 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -32,7 +32,7 @@ suppress_warnings, ) -from ..casting import OK_FLOATS, floor_log2, shared_range, type_info +from ..casting import OK_FLOATS, floor_log2, sctypes, shared_range, type_info from ..openers import BZ2File, ImageOpener, Opener from ..optpkg import optional_package from ..tmpdirs import InTemporaryDirectory @@ -59,12 +59,12 @@ pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') -#: convenience variables for numpy types -FLOAT_TYPES = np.core.sctypes['float'] -COMPLEX_TYPES = np.core.sctypes['complex'] +# convenience variables for numpy types +FLOAT_TYPES = sctypes['float'] +COMPLEX_TYPES = sctypes['complex'] CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES -INT_TYPES = np.core.sctypes['int'] -IUINT_TYPES = INT_TYPES + np.core.sctypes['uint'] +INT_TYPES = sctypes['int'] +IUINT_TYPES = INT_TYPES + sctypes['uint'] NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES FP_RUNTIME_WARN = Version(np.__version__) >= Version('1.24.0.dev0+239') @@ -598,7 +598,7 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error NUMERICAL_TYPES = sum( - [np.core.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], [] + [sctypes[key] for key in ['int', 'uint', 'float', 'complex']], [] ) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, @@ -832,10 +832,10 @@ def check_against(f1, f2): return f1 if FLOAT_TYPES.index(f1) >= FLOAT_TYPES.index(f2) else f2 for first in FLOAT_TYPES: - for other in IUINT_TYPES + np.core.sctypes['complex']: + for other in IUINT_TYPES + sctypes['complex']: assert better_float_of(first, other) == first assert better_float_of(other, first) == first - for other2 in IUINT_TYPES + np.core.sctypes['complex']: + for other2 in IUINT_TYPES + sctypes['complex']: assert better_float_of(other, other2) == np.float32 assert better_float_of(other, other2, np.float64) == np.float64 for second in FLOAT_TYPES: diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 73f19e894d..3d08f01149 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -31,12 +31,13 @@ from numpy.testing import assert_array_equal from .. import imageglobals +from ..casting import sctypes from ..batteryrunners import Report from ..spatialimages import HeaderDataError from ..volumeutils import Recoder, native_code, swapped_code from ..wrapstruct import LabeledWrapStruct, WrapStruct, WrapStructError -INTEGER_TYPES = np.core.sctypes['int'] + np.core.sctypes['uint'] +INTEGER_TYPES = sctypes['int'] + sctypes['uint'] def log_chk(hdr, level): From 796616011d99b8bf7ffaeac7851735708a5b4868 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 13:24:30 +0200 Subject: [PATCH 322/702] rm unused imports --- nibabel/tests/test_image_api.py | 4 ++-- nibabel/tests/test_proxy_api.py | 3 +-- nibabel/tests/test_scaling.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index cb39ee747f..c3e44a7e05 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -39,7 +39,7 @@ import unittest import pytest -from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal, assert_warns +from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal from nibabel.arraywriters import WriterError from nibabel.testing import ( @@ -69,7 +69,7 @@ minc2, parrec, ) -from ..deprecator import ExpiredDeprecationError +from ..casting import sctypes from ..spatialimages import SpatialImage from ..tmpdirs import InTemporaryDirectory from .test_api_validators import ValidateAPI diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 004d447e35..3b4412ceee 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -36,12 +36,11 @@ import numpy as np import pytest -from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal +from numpy.testing import assert_allclose, assert_array_equal from .. import ecat, minc1, minc2, parrec from ..analyze import AnalyzeHeader from ..arrayproxy import ArrayProxy, is_proxy -from ..deprecator import ExpiredDeprecationError from ..casting import have_binary128, sctypes from ..externals.netcdf import netcdf_file from ..freesurfer.mghformat import MGHHeader diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index 6cde5a5aa1..e1c350b003 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -13,7 +13,7 @@ import numpy as np import pytest -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_equal from ..casting import sctypes, type_info from ..testing import suppress_warnings From 07cea85313bfb0486b9be59d6364d8c5cb2cbe32 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 13:24:37 +0200 Subject: [PATCH 323/702] rm unused variable definition --- nibabel/tests/test_image_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index c3e44a7e05..9abb1a313f 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -172,7 +172,7 @@ def validate_filenames(self, imaker, params): for path in (fname, pathlib.Path(fname)): with InTemporaryDirectory(): # Validate that saving or loading a file doesn't use deprecated methods internally - with clear_and_catch_warnings() as w: + with clear_and_catch_warnings(): warnings.filterwarnings( 'error', category=DeprecationWarning, module=r'nibabel.*' ) From d3d23db01913360acafaa2010c21c8a34d49c572 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 13:27:38 +0200 Subject: [PATCH 324/702] fix blue --- nibabel/casting.py | 10 +++++----- nibabel/tests/test_volumeutils.py | 4 +--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 8adbe28307..4109860502 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -25,11 +25,11 @@ class CastingError(Exception): # np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead. sctypes = { - "int": [np.int8, np.int16, np.int32, np.int64], - "uint": [np.uint8, np.uint16, np.uint32, np.uint64], - "float": [np.float16, np.float32, np.float64, np.float128], - "complex": [np.complex64, np.complex128, np.complex256], - "others": [bool, object, bytes, str, np.void], + 'int': [np.int8, np.int16, np.int32, np.int64], + 'uint': [np.uint8, np.uint16, np.uint32, np.uint64], + 'float': [np.float16, np.float32, np.float64, np.float128], + 'complex': [np.complex64, np.complex128, np.complex256], + 'others': [bool, object, bytes, str, np.void], } diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 06e2c4c766..59a5f1989f 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -597,9 +597,7 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum( - [sctypes[key] for key in ['int', 'uint', 'float', 'complex']], [] - ) + NUMERICAL_TYPES = sum([sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, NUMERICAL_TYPES, From ac9e16f2c1d17812099000e5a0ff9c82155a2a63 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 15:55:42 +0200 Subject: [PATCH 325/702] fix missing import --- nibabel/tests/test_testing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index e97f53d5c1..dee3ea3554 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -8,6 +8,7 @@ import numpy as np import pytest +from ..casting import sctypes from ..testing import ( assert_allclose_safely, assert_re_in, From 6c30a847a9f56f997fc2d312ff0bf64fa71acfe2 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 16:27:09 +0200 Subject: [PATCH 326/702] try without using the sized aliases --- nibabel/casting.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 4109860502..68b2e253cb 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -25,10 +25,10 @@ class CastingError(Exception): # np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead. sctypes = { - 'int': [np.int8, np.int16, np.int32, np.int64], - 'uint': [np.uint8, np.uint16, np.uint32, np.uint64], - 'float': [np.float16, np.float32, np.float64, np.float128], - 'complex': [np.complex64, np.complex128, np.complex256], + 'int': list(set([np.byte, np.short, np.intc, np.longlong])), + 'uint': list(set([np.ubyte, np.ushort, np.uintc, np.ulonglong])), + 'float': list(set([np.half, np.single, np.double, np.longdouble])), + 'complex': list(set([np.csingle, np.cdouble, np.clongdouble])), 'others': [bool, object, bytes, str, np.void], } From 3639711bfffc948a009c1cd0266630668891ea81 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 17:01:00 +0200 Subject: [PATCH 327/702] Revert "try without using the sized aliases" This reverts commit 6c30a847a9f56f997fc2d312ff0bf64fa71acfe2. --- nibabel/casting.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 68b2e253cb..4109860502 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -25,10 +25,10 @@ class CastingError(Exception): # np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead. sctypes = { - 'int': list(set([np.byte, np.short, np.intc, np.longlong])), - 'uint': list(set([np.ubyte, np.ushort, np.uintc, np.ulonglong])), - 'float': list(set([np.half, np.single, np.double, np.longdouble])), - 'complex': list(set([np.csingle, np.cdouble, np.clongdouble])), + 'int': [np.int8, np.int16, np.int32, np.int64], + 'uint': [np.uint8, np.uint16, np.uint32, np.uint64], + 'float': [np.float16, np.float32, np.float64, np.float128], + 'complex': [np.complex64, np.complex128, np.complex256], 'others': [bool, object, bytes, str, np.void], } From 4cdba01dc41fa26717f576baadaebd27b952f361 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 17:02:02 +0200 Subject: [PATCH 328/702] try with sized aliases again and np.longdouble instead of np.float128 --- nibabel/casting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 4109860502..229013512f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -27,7 +27,7 @@ class CastingError(Exception): sctypes = { 'int': [np.int8, np.int16, np.int32, np.int64], 'uint': [np.uint8, np.uint16, np.uint32, np.uint64], - 'float': [np.float16, np.float32, np.float64, np.float128], + 'float': [np.float16, np.float32, np.float64, np.longdouble], 'complex': [np.complex64, np.complex128, np.complex256], 'others': [bool, object, bytes, str, np.void], } From 2398cc35e84ed4f39f20e2b593d79fd09972ac7b Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 29 Aug 2023 19:43:02 +0200 Subject: [PATCH 329/702] use combination of getattr and hasattr, include float96 and complex192 to the list --- nibabel/casting.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 229013512f..4184d69dcc 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -25,10 +25,24 @@ class CastingError(Exception): # np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead. sctypes = { - 'int': [np.int8, np.int16, np.int32, np.int64], - 'uint': [np.uint8, np.uint16, np.uint32, np.uint64], - 'float': [np.float16, np.float32, np.float64, np.longdouble], - 'complex': [np.complex64, np.complex128, np.complex256], + 'int': [ + getattr(np, dtype) for dtype in ('int8', 'int16', 'int32', 'int64') if hasattr(np, dtype) + ], + 'uint': [ + getattr(np, dtype) + for dtype in ('uint8', 'uint16', 'uint32', 'uint64') + if hasattr(np, dtype) + ], + 'float': [ + getattr(np, dtype) + for dtype in ('float16', 'float32', 'float64', 'float96', 'float128') + if hasattr(np, dtype) + ], + 'complex': [ + getattr(np, dtype) + for dtype in ('complex64', 'complex128', 'complex192', 'complex256') + if hasattr(np, dtype) + ], 'others': [bool, object, bytes, str, np.void], } From 3c94f8cd804b219cce006d5db8bb2e78a2c3ee69 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 5 Sep 2023 22:29:00 -0400 Subject: [PATCH 330/702] CI: Avoid Python 3.11.4 for unpacking sdist --- .github/workflows/stable.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 18a30d6d07..564af6ca34 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -40,7 +40,8 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v4 with: - python-version: 3 + # Bug in 3.11.4 tarfile extraction can break python -m build + python-version: '>=3, != 3.11.4' - run: pip install --upgrade build twine - name: Build sdist and wheel run: python -m build @@ -79,7 +80,8 @@ jobs: path: archive/ - uses: actions/setup-python@v4 with: - python-version: 3 + # Bug in 3.11.4 tarfile extraction may break sdist installation + python-version: '>=3, != 3.11.4' - name: Display Python version run: python -c "import sys; print(sys.version)" - name: Update pip From 184335580fcaedec64e5d5e14e8104cfe552e260 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Sep 2023 09:30:35 -0400 Subject: [PATCH 331/702] Revert "CI: Avoid Python 3.11.4 for unpacking sdist" This reverts commit 3c94f8cd804b219cce006d5db8bb2e78a2c3ee69. This was not the correct way to specify a Python version, and I can't be bothered to figure out the correct way, as this issue will be fixed in build 1.0.1. See bug report https://github.com/pypa/build/issues/674 and fix https://github.com/pypa/build/pull/675 --- .github/workflows/stable.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 564af6ca34..18a30d6d07 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -40,8 +40,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v4 with: - # Bug in 3.11.4 tarfile extraction can break python -m build - python-version: '>=3, != 3.11.4' + python-version: 3 - run: pip install --upgrade build twine - name: Build sdist and wheel run: python -m build @@ -80,8 +79,7 @@ jobs: path: archive/ - uses: actions/setup-python@v4 with: - # Bug in 3.11.4 tarfile extraction may break sdist installation - python-version: '>=3, != 3.11.4' + python-version: 3 - name: Display Python version run: python -c "import sys; print(sys.version)" - name: Update pip From f7db5bf353ea149e26bcdbae957bbfe150860822 Mon Sep 17 00:00:00 2001 From: Blake Dewey Date: Fri, 8 Sep 2023 14:19:55 -0400 Subject: [PATCH 332/702] Fix typing in SpatialImage __init__ This corrects the typing for the `SpatialImage` `__init__` function by allowing `None` for `affine` --- nibabel/spatialimages.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 73a5fcf468..1084efe40e 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -481,7 +481,7 @@ class SpatialImage(DataobjImage): def __init__( self, dataobj: ArrayLike, - affine: np.ndarray, + affine: np.ndarray | None, header: FileBasedHeader | ty.Mapping | None = None, extra: ty.Mapping | None = None, file_map: FileMap | None = None, From 21113e348b6685f43eef302d184cf2a53f54bbb3 Mon Sep 17 00:00:00 2001 From: Blake Dewey Date: Fri, 8 Sep 2023 14:28:11 -0400 Subject: [PATCH 333/702] Try to fix mypy error based on type change --- nibabel/spatialimages.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 1084efe40e..ef34fe9466 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -566,6 +566,7 @@ def update_header(self) -> None: def _affine2header(self) -> None: """Unconditionally set affine into the header""" + assert self._affine is not None RZS = self._affine[:3, :3] vox = np.sqrt(np.sum(RZS * RZS, axis=0)) hdr = self._header From 72b6bfdfadd4cf9e01ae04da041bd8ac121a0382 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 17 Jul 2023 20:00:35 -0400 Subject: [PATCH 334/702] CI: Add 3.12 pre-release tests --- .github/workflows/pre-release.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 630f09d99b..4431c7135f 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -33,7 +33,7 @@ jobs: strategy: matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12"] architecture: ['x64', 'x86'] install: ['pip'] check: ['test'] @@ -54,6 +54,8 @@ jobs: architecture: x86 - os: macos-latest architecture: x86 + - python-version: '3.12' + architecture: x86 env: DEPENDS: ${{ matrix.depends }} @@ -72,6 +74,7 @@ jobs: with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} + allow-prereleases: true - name: Display Python version run: python -c "import sys; print(sys.version)" - name: Create virtual environment From 59b93afc3cf82e6830e292687932dd3f2110d66c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 15 Aug 2023 09:01:06 -0400 Subject: [PATCH 335/702] TEST: Mark file:/// URL test as xfail --- nibabel/tests/test_image_api.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 091bc57e8c..890619bad5 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -25,6 +25,7 @@ import io import pathlib +import sys import warnings from functools import partial from itertools import product @@ -579,6 +580,10 @@ def validate_from_url(self, imaker, params): del img del rt_img + @pytest.mark.xfail( + sys.version_info >= (3, 12), + reason='Response type for file: urls is not a stream in Python 3.12', + ) def validate_from_file_url(self, imaker, params): tmp_path = self.tmp_path From 06c1e76be6c5f945d0812961e22f808e2334404e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 15 Aug 2023 09:10:27 -0400 Subject: [PATCH 336/702] CI: Disable building dependencies from source --- tools/ci/install_dependencies.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/ci/install_dependencies.sh b/tools/ci/install_dependencies.sh index f26c5204c0..2ea4a524e8 100755 --- a/tools/ci/install_dependencies.sh +++ b/tools/ci/install_dependencies.sh @@ -19,10 +19,10 @@ if [ -n "$EXTRA_PIP_FLAGS" ]; then fi if [ -n "$DEPENDS" ]; then - pip install ${EXTRA_PIP_FLAGS} --prefer-binary ${!DEPENDS} + pip install ${EXTRA_PIP_FLAGS} --only-binary :all: ${!DEPENDS} if [ -n "$OPTIONAL_DEPENDS" ]; then for DEP in ${!OPTIONAL_DEPENDS}; do - pip install ${EXTRA_PIP_FLAGS} --prefer-binary $DEP || true + pip install ${EXTRA_PIP_FLAGS} --only-binary :all: $DEP || true done fi fi From eb39c08156bdc1ccf1ae197128eec7226696da8e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 23 Aug 2023 10:27:01 -0400 Subject: [PATCH 337/702] FIX: Hack around 3.12rc1 bug (python/cpython#108111) --- nibabel/openers.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nibabel/openers.py b/nibabel/openers.py index 90c7774d12..9a024680a2 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -78,6 +78,12 @@ def __init__( mtime=mtime, ) + def seek(self, pos: int, whence: int = 0, /) -> int: + # Work around bug (gh-180111) in Python 3.12rc1, where seeking without + # flushing can cause write of excess null bytes + self.flush() + return super().seek(pos, whence) + def _gzip_open( filename: str, From 2eba8dbf000155cf6666aef76ee07c7a61a2951c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 8 Sep 2023 16:44:04 -0400 Subject: [PATCH 338/702] TEST: Use tmp_path and explicit delete to appease Windows tempdir cleanup --- nibabel/tests/test_openers.py | 59 ++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 0d150a145c..a228e66135 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -127,35 +127,36 @@ def patch_indexed_gzip(state): yield -def test_Opener_gzip_type(): - # Test that BufferedGzipFile or IndexedGzipFile are used as appropriate - - data = 'this is some test data' - fname = 'test.gz' - - with InTemporaryDirectory(): - - # make some test data - with GzipFile(fname, mode='wb') as f: - f.write(data.encode()) - - # Each test is specified by a tuple containing: - # (indexed_gzip present, Opener kwargs, expected file type) - tests = [ - (False, {'mode': 'rb', 'keep_open': True}, GzipFile), - (False, {'mode': 'rb', 'keep_open': False}, GzipFile), - (False, {'mode': 'wb', 'keep_open': True}, GzipFile), - (False, {'mode': 'wb', 'keep_open': False}, GzipFile), - (True, {'mode': 'rb', 'keep_open': True}, MockIndexedGzipFile), - (True, {'mode': 'rb', 'keep_open': False}, MockIndexedGzipFile), - (True, {'mode': 'wb', 'keep_open': True}, GzipFile), - (True, {'mode': 'wb', 'keep_open': False}, GzipFile), - ] - - for test in tests: - igzip_present, kwargs, expected = test - with patch_indexed_gzip(igzip_present): - assert isinstance(Opener(fname, **kwargs).fobj, expected) +def test_Opener_gzip_type(tmp_path): + # Test that GzipFile or IndexedGzipFile are used as appropriate + + data = b'this is some test data' + fname = tmp_path / 'test.gz' + + # make some test data + with GzipFile(fname, mode='wb') as f: + f.write(data) + + # Each test is specified by a tuple containing: + # (indexed_gzip present, Opener kwargs, expected file type) + tests = [ + (False, {'mode': 'rb', 'keep_open': True}, GzipFile), + (False, {'mode': 'rb', 'keep_open': False}, GzipFile), + (False, {'mode': 'wb', 'keep_open': True}, GzipFile), + (False, {'mode': 'wb', 'keep_open': False}, GzipFile), + (True, {'mode': 'rb', 'keep_open': True}, MockIndexedGzipFile), + (True, {'mode': 'rb', 'keep_open': False}, MockIndexedGzipFile), + (True, {'mode': 'wb', 'keep_open': True}, GzipFile), + (True, {'mode': 'wb', 'keep_open': False}, GzipFile), + ] + + for test in tests: + igzip_present, kwargs, expected = test + with patch_indexed_gzip(igzip_present): + opener = Opener(fname, **kwargs) + assert isinstance(opener.fobj, expected) + # Explicit close to appease Windows + del opener class TestImageOpener(unittest.TestCase): From a42321f44fef53fe6e13fdaa9eaa9804fd5a05ef Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 8 Sep 2023 17:11:18 -0400 Subject: [PATCH 339/702] TEST: Use a less finicky method of creating temporary files --- nibabel/streamlines/tests/test_streamlines.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index dfb74042a3..300397b2b4 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -84,7 +84,7 @@ def setup(): ) -def test_is_supported_detect_format(): +def test_is_supported_detect_format(tmp_path): # Test is_supported and detect_format functions # Empty file/string f = BytesIO() @@ -103,7 +103,8 @@ def test_is_supported_detect_format(): # Wrong extension but right magic number for tfile_cls in FORMATS.values(): - with tempfile.TemporaryFile(mode='w+b', suffix='.txt') as f: + fpath = tmp_path / 'test.txt' + with open(fpath, 'w+b') as f: f.write(asbytes(tfile_cls.MAGIC_NUMBER)) f.seek(0, os.SEEK_SET) assert nib.streamlines.is_supported(f) @@ -111,7 +112,8 @@ def test_is_supported_detect_format(): # Good extension but wrong magic number for ext, tfile_cls in FORMATS.items(): - with tempfile.TemporaryFile(mode='w+b', suffix=ext) as f: + fpath = tmp_path / f'test{ext}' + with open(fpath, 'w+b') as f: f.write(b'pass') f.seek(0, os.SEEK_SET) assert not nib.streamlines.is_supported(f) From fcd8dd000c955033ee3add15038274c57ecb0dbe Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 12 Sep 2023 20:30:25 +0200 Subject: [PATCH 340/702] rm use of np.maximum_sctype --- nibabel/quaternions.py | 4 +++- nibabel/tests/test_scaling.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index ec40660607..1445d2adf3 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -29,7 +29,9 @@ import numpy as np -MAX_FLOAT = np.maximum_sctype(float) +from .casting import sctypes + +MAX_FLOAT = sctypes["float"][-1] FLOAT_EPS = np.finfo(float).eps diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index e1c350b003..f441126d1d 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -188,7 +188,7 @@ def test_scaling_in_abstract(category0, category1, overflow): def check_int_a2f(in_type, out_type): # Check that array to / from file returns roughly the same as input - big_floater = np.maximum_sctype(np.float64) + big_floater = sctypes["float"][-1] info = type_info(in_type) this_min, this_max = info['min'], info['max'] if not in_type in sctypes['complex']: From edd95db4bbb92b47409a74b155f81f02ae59cb27 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 12 Sep 2023 20:30:33 +0200 Subject: [PATCH 341/702] rm unused import --- nibabel/tests/test_analyze.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index b4a3cd297b..41d11695c2 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -27,7 +27,6 @@ from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError from ..casting import as_int -from ..loadsave import read_img_data from ..nifti1 import Nifti1Header from ..optpkg import optional_package from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types From 53655ec7776c76fe7760b8d428375c6734d8bb64 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 12 Sep 2023 20:35:15 +0200 Subject: [PATCH 342/702] fix quotes for blue style --- nibabel/quaternions.py | 2 +- nibabel/tests/test_scaling.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 1445d2adf3..d2fc3ac4ca 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -31,7 +31,7 @@ from .casting import sctypes -MAX_FLOAT = sctypes["float"][-1] +MAX_FLOAT = sctypes['float'][-1] FLOAT_EPS = np.finfo(float).eps diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index f441126d1d..f667b4164d 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -188,7 +188,7 @@ def test_scaling_in_abstract(category0, category1, overflow): def check_int_a2f(in_type, out_type): # Check that array to / from file returns roughly the same as input - big_floater = sctypes["float"][-1] + big_floater = sctypes['float'][-1] info = type_info(in_type) this_min, this_max = info['min'], info['max'] if not in_type in sctypes['complex']: From e97c99209019498a92972a7d3a130937d5fe7eb9 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 12 Sep 2023 21:11:24 +0200 Subject: [PATCH 343/702] fix np.sctypeDict calls --- nibabel/casting.py | 17 ++++++++++++++++- nibabel/spatialimages.py | 3 ++- nibabel/tests/test_analyze.py | 4 ++-- nibabel/tests/test_spm99analyze.py | 4 ++-- 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 4184d69dcc..6f26f17cd9 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -45,7 +45,22 @@ class CastingError(Exception): ], 'others': [bool, object, bytes, str, np.void], } - +# fmt: off +sctypes_named = { + getattr(np, dtype) + for dtype in ( + 'int8', 'byte', 'int16', 'short', 'int32', 'intc', 'int_', 'int64', 'longlong', + 'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', # noqa: E501 + 'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', # noqa: E501 + 'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # noqa: E501 + # other names of the built-in scalar types + 'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # noqa: E501 + # other + 'object_', 'void', + ) + if hasattr(np, dtype) +} +# fmt: on def float_to_int(arr, int_type, nan2zero=True, infmax=False): """Convert floating point array `arr` to type `int_type` diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 73a5fcf468..11853ad66f 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -139,6 +139,7 @@ import numpy as np from .arrayproxy import ArrayLike +from .casting import sctypes_named from .dataobj_images import DataobjImage from .filebasedimages import FileBasedHeader, FileBasedImage from .fileholders import FileMap @@ -333,7 +334,7 @@ def _supported_np_types(klass: type[HasDtype]) -> set[type[np.generic]]: else: raise e supported = set() - for np_type in set(np.sctypeDict.values()): + for np_type in sctypes_named: try: obj.set_data_dtype(np_type) except HeaderDataError: diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 41d11695c2..85022d78cd 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -26,7 +26,7 @@ from .. import imageglobals from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError -from ..casting import as_int +from ..casting import as_int, sctypes_named from ..nifti1 import Nifti1Header from ..optpkg import optional_package from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types @@ -52,7 +52,7 @@ def add_duplicate_types(supported_np_types): # Update supported numpy types with named scalar types that map to the same set of dtypes dtypes = {np.dtype(t) for t in supported_np_types} supported_np_types.update( - scalar for scalar in set(np.sctypeDict.values()) if np.dtype(scalar) in dtypes + scalar for scalar in sctypes_named if np.dtype(scalar) in dtypes ) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index ccc1a80329..24e4a340f5 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -23,7 +23,7 @@ # files needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') -from ..casting import shared_range, type_info +from ..casting import sctypes_named, shared_range, type_info from ..spatialimages import HeaderDataError from ..spm99analyze import HeaderTypeError, Spm99AnalyzeHeader, Spm99AnalyzeImage from ..testing import ( @@ -39,7 +39,7 @@ # For testing, we want all concrete classes of a type # Key on kind, rather than abstract base classes, since timedelta64 is a signedinteger sctypes = {} -for sctype in set(np.sctypeDict.values()): +for sctype in sctypes_named: sctypes.setdefault(np.dtype(sctype).kind, []).append(sctype) # Sort types to ensure that xdist doesn't complain about test order when we parametrize From 65106d9d0023076533923cd0f36a4ef52a25e24d Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 12 Sep 2023 22:13:19 +0200 Subject: [PATCH 344/702] better var name --- nibabel/casting.py | 2 +- nibabel/spatialimages.py | 4 ++-- nibabel/tests/test_analyze.py | 4 ++-- nibabel/tests/test_spm99analyze.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 6f26f17cd9..e56722676a 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -46,7 +46,7 @@ class CastingError(Exception): 'others': [bool, object, bytes, str, np.void], } # fmt: off -sctypes_named = { +sctypes_aliases = { getattr(np, dtype) for dtype in ( 'int8', 'byte', 'int16', 'short', 'int32', 'intc', 'int_', 'int64', 'longlong', diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 11853ad66f..f8bfd9ec05 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -139,7 +139,7 @@ import numpy as np from .arrayproxy import ArrayLike -from .casting import sctypes_named +from .casting import sctypes_aliases from .dataobj_images import DataobjImage from .filebasedimages import FileBasedHeader, FileBasedImage from .fileholders import FileMap @@ -334,7 +334,7 @@ def _supported_np_types(klass: type[HasDtype]) -> set[type[np.generic]]: else: raise e supported = set() - for np_type in sctypes_named: + for np_type in sctypes_aliases: try: obj.set_data_dtype(np_type) except HeaderDataError: diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 85022d78cd..75c64d4e53 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -26,7 +26,7 @@ from .. import imageglobals from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError -from ..casting import as_int, sctypes_named +from ..casting import as_int, sctypes_aliases from ..nifti1 import Nifti1Header from ..optpkg import optional_package from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types @@ -52,7 +52,7 @@ def add_duplicate_types(supported_np_types): # Update supported numpy types with named scalar types that map to the same set of dtypes dtypes = {np.dtype(t) for t in supported_np_types} supported_np_types.update( - scalar for scalar in sctypes_named if np.dtype(scalar) in dtypes + scalar for scalar in sctypes_aliases if np.dtype(scalar) in dtypes ) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 24e4a340f5..f65855ce4b 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -23,7 +23,7 @@ # files needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') -from ..casting import sctypes_named, shared_range, type_info +from ..casting import sctypes_aliases, shared_range, type_info from ..spatialimages import HeaderDataError from ..spm99analyze import HeaderTypeError, Spm99AnalyzeHeader, Spm99AnalyzeImage from ..testing import ( @@ -39,7 +39,7 @@ # For testing, we want all concrete classes of a type # Key on kind, rather than abstract base classes, since timedelta64 is a signedinteger sctypes = {} -for sctype in sctypes_named: +for sctype in sctypes_aliases: sctypes.setdefault(np.dtype(sctype).kind, []).append(sctype) # Sort types to ensure that xdist doesn't complain about test order when we parametrize From 363b40316c6a8aa67f40d1a4669896583b9188cf Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Tue, 12 Sep 2023 22:17:06 +0200 Subject: [PATCH 345/702] rm unused imports --- nibabel/tests/test_arrayproxy.py | 1 - nibabel/tests/test_init.py | 4 ++-- nibabel/tests/test_pkg_info.py | 1 - nibabel/tests/test_spatialimages.py | 3 --- 4 files changed, 2 insertions(+), 7 deletions(-) diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 7558c55ea5..e50caa54c9 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -12,7 +12,6 @@ import contextlib import gzip import pickle -import warnings from io import BytesIO from unittest import mock diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index 877c045f6e..2317a6397e 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -4,9 +4,9 @@ import pytest try: - from importlib.resources import as_file, files + from importlib.resources import files except ImportError: - from importlib_resources import as_file, files + from importlib_resources import files import nibabel as nib diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 0d8146fdb0..dfe18c975a 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -2,7 +2,6 @@ """ import pytest -from packaging.version import Version import nibabel as nib from nibabel.pkg_info import cmp_pkg_version diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 95d3a2a151..aacff74b7b 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -9,7 +9,6 @@ """Testing spatialimages """ -import warnings from io import BytesIO import numpy as np @@ -21,10 +20,8 @@ from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage from ..testing import ( bytesio_round_trip, - clear_and_catch_warnings, expires, memmap_after_ufunc, - suppress_warnings, ) from ..tmpdirs import InTemporaryDirectory From 916bff9a397cd441c80f1b4fcb912bef767a5d39 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 7 Sep 2023 12:15:41 -0400 Subject: [PATCH 346/702] ENH: Add pointset data structures [BIAP9] --- nibabel/pointset.py | 128 +++++++++++++++++++++++++ nibabel/tests/test_pointset.py | 166 +++++++++++++++++++++++++++++++++ 2 files changed, 294 insertions(+) create mode 100644 nibabel/pointset.py create mode 100644 nibabel/tests/test_pointset.py diff --git a/nibabel/pointset.py b/nibabel/pointset.py new file mode 100644 index 0000000000..6c25237510 --- /dev/null +++ b/nibabel/pointset.py @@ -0,0 +1,128 @@ +import operator as op +from functools import reduce + +import numpy as np + +from nibabel.affines import apply_affine + + +class Pointset: + def __init__(self, coords): + self._coords = coords + + @property + def n_coords(self): + """Number of coordinates + + Subclasses should override with more efficient implementations. + """ + return self.get_coords().shape[0] + + def get_coords(self, name=None): + """Nx3 array of coordinates in RAS+ space""" + return self._coords + + +class TriangularMesh(Pointset): + def __init__(self, mesh): + if isinstance(mesh, tuple) and len(mesh) == 2: + coords, self._triangles = mesh + elif hasattr(mesh, 'coords') and hasattr(mesh, 'triangles'): + coords = mesh.coords + self._triangles = mesh.triangles + elif hasattr(mesh, 'get_mesh'): + coords, self._triangles = mesh.get_mesh() + else: + raise ValueError('Cannot interpret input as triangular mesh') + super().__init__(coords) + + @property + def n_triangles(self): + """Number of faces + + Subclasses should override with more efficient implementations. + """ + return self._triangles.shape[0] + + def get_triangles(self): + """Mx3 array of indices into coordinate table""" + return self._triangles + + def get_mesh(self, name=None): + return self.get_coords(name=name), self.get_triangles() + + def get_names(self): + """List of surface names that can be passed to + ``get_{coords,triangles,mesh}`` + """ + raise NotImplementedError + + ## This method is called for by the BIAP, but it now seems simpler to wait to + ## provide it until there are any proposed implementations + # def decimate(self, *, n_coords=None, ratio=None): + # """ Return a TriangularMesh with a smaller number of vertices that + # preserves the geometry of the original """ + # # To be overridden when a format provides optimization opportunities + # raise NotImplementedError + + +class TriMeshFamily(TriangularMesh): + def __init__(self, mapping, default=None): + self._triangles = None + self._coords = {} + for name, mesh in dict(mapping).items(): + coords, triangles = TriangularMesh(mesh).get_mesh() + if self._triangles is None: + self._triangles = triangles + self._coords[name] = coords + + if default is None: + default = next(iter(self._coords)) + self._default = default + + def get_names(self): + return list(self._coords) + + def get_coords(self, name=None): + if name is None: + name = self._default + return self._coords[name] + + +class NdGrid(Pointset): + """ + Attributes + ---------- + shape : 3-tuple + number of coordinates in each dimension of grid + """ + + def __init__(self, shape, affines): + self.shape = tuple(shape) + try: + self._affines = dict(affines) + except (TypeError, ValueError): + self._affines = {'world': np.array(affines)} + if 'voxels' not in self._affines: + self._affines['voxels'] = np.eye(4, dtype=np.uint8) + + def get_affine(self, name=None): + """4x4 array""" + if name is None: + name = next(iter(self._affines)) + return self._affines[name] + + def get_coords(self, name=None): + if name is None: + name = next(iter(self._affines)) + aff = self.get_affine(name) + dt = np.result_type(*(np.min_scalar_type(dim) for dim in self.shape)) + # This is pretty wasteful; we almost certainly want instead an + # object that will retrieve a coordinate when indexed, but where + # np.array(obj) returns this + ijk_coords = np.array(list(np.ndindex(self.shape)), dtype=dt) + return apply_affine(aff, ijk_coords) + + @property + def n_coords(self): + return reduce(op.mul, self.shape) diff --git a/nibabel/tests/test_pointset.py b/nibabel/tests/test_pointset.py new file mode 100644 index 0000000000..efea8bbd7a --- /dev/null +++ b/nibabel/tests/test_pointset.py @@ -0,0 +1,166 @@ +from pathlib import Path +from unittest import skipUnless + +import numpy as np + +from nibabel import pointset as ps +from nibabel.arrayproxy import ArrayProxy +from nibabel.onetime import auto_attr +from nibabel.optpkg import optional_package +from nibabel.tests.nibabel_data import get_nibabel_data + +h5, has_h5py, _ = optional_package('h5py') + +FS_DATA = Path(get_nibabel_data()) / 'nitest-freesurfer' + + +class H5ArrayProxy: + def __init__(self, file_like, dataset_name): + self.file_like = file_like + self.dataset_name = dataset_name + with h5.File(file_like, 'r') as h5f: + arr = h5f[dataset_name] + self._shape = arr.shape + self._dtype = arr.dtype + + @property + def is_proxy(self): + return True + + @property + def shape(self): + return self._shape + + @property + def ndim(self): + return len(self.shape) + + @property + def dtype(self): + return self._dtype + + def __array__(self, dtype=None): + with h5.File(self.file_like, 'r') as h5f: + return np.asanyarray(h5f[self.dataset_name], dtype) + + def __getitem__(self, slicer): + with h5.File(self.file_like, 'r') as h5f: + return h5f[self.dataset_name][slicer] + + +class H5Geometry(ps.TriMeshFamily): + """Simple Geometry file structure that combines a single topology + with one or more coordinate sets + """ + + @classmethod + def from_filename(klass, pathlike): + meshes = {} + with h5.File(pathlike, 'r') as h5f: + triangles = H5ArrayProxy(pathlike, '/topology') + for name in h5f['coordinates']: + meshes[name] = (H5ArrayProxy(pathlike, f'/coordinates/{name}'), triangles) + return klass(meshes) + + def to_filename(self, pathlike): + with h5.File(pathlike, 'w') as h5f: + h5f.create_dataset('/topology', data=self.get_triangles()) + for name, coord in self._coords.items(): + h5f.create_dataset(f'/coordinates/{name}', data=coord) + + +class FSGeometryProxy: + def __init__(self, pathlike): + self._file_like = str(Path(pathlike)) + self._offset = None + self._vnum = None + self._fnum = None + + def _peek(self): + from nibabel.freesurfer.io import _fread3 + + with open(self._file_like, 'rb') as fobj: + magic = _fread3(fobj) + if magic != 16777214: + raise NotImplementedError('Triangle files only!') + fobj.readline() + fobj.readline() + self._vnum = np.fromfile(fobj, '>i4', 1)[0] + self._fnum = np.fromfile(fobj, '>i4', 1)[0] + self._offset = fobj.tell() + + @property + def vnum(self): + if self._vnum is None: + self._peek() + return self._vnum + + @property + def fnum(self): + if self._fnum is None: + self._peek() + return self._fnum + + @property + def offset(self): + if self._offset is None: + self._peek() + return self._offset + + @auto_attr + def coords(self): + ap = ArrayProxy(self._file_like, ((self.vnum, 3), '>f4', self.offset)) + ap.order = 'C' + return ap + + @auto_attr + def triangles(self): + offset = self.offset + 12 * self.vnum + ap = ArrayProxy(self._file_like, ((self.fnum, 3), '>i4', offset)) + ap.order = 'C' + return ap + + +class FreeSurferHemisphere(ps.TriMeshFamily): + @classmethod + def from_filename(klass, pathlike): + path = Path(pathlike) + hemi, default = path.name.split('.') + mesh_names = ( + 'orig', + 'white', + 'smoothwm', + 'pial', + 'inflated', + 'sphere', + 'midthickness', + 'graymid', + ) # Often created + if default not in mesh_names: + mesh_names.append(default) + meshes = {} + for mesh in mesh_names: + fpath = path.parent / f'{hemi}.{mesh}' + if fpath.exists(): + meshes[mesh] = FSGeometryProxy(fpath) + hemi = klass(meshes) + hemi._default = default + return hemi + + +def test_FreeSurferHemisphere(): + lh = FreeSurferHemisphere.from_filename(FS_DATA / 'fsaverage/surf/lh.white') + assert lh.n_coords == 163842 + assert lh.n_triangles == 327680 + + +@skipUnless(has_h5py, reason='Test requires h5py') +def test_make_H5Geometry(tmp_path): + lh = FreeSurferHemisphere.from_filename(FS_DATA / 'fsaverage/surf/lh.white') + h5geo = H5Geometry({name: lh.get_mesh(name) for name in ('white', 'pial')}) + h5geo.to_filename(tmp_path / 'geometry.h5') + + rt_h5geo = H5Geometry.from_filename(tmp_path / 'geometry.h5') + assert set(h5geo._coords) == set(rt_h5geo._coords) + assert np.array_equal(lh.get_coords('white'), rt_h5geo.get_coords('white')) + assert np.array_equal(lh.get_triangles(), rt_h5geo.get_triangles()) From 5dceb6490bea99b5d0847459a8840bf58f8c6df9 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 7 Sep 2023 20:56:35 -0400 Subject: [PATCH 347/702] Update nibabel/pointset.py Co-authored-by: Oscar Esteban --- nibabel/pointset.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/nibabel/pointset.py b/nibabel/pointset.py index 6c25237510..91b7531404 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -19,7 +19,15 @@ def n_coords(self): return self.get_coords().shape[0] def get_coords(self, name=None): - """Nx3 array of coordinates in RAS+ space""" + """Nx3 array of coordinates. + + Parameters + ---------- + name : :obj:`str` + Select a particular coordinate system if more than one may exist. + By default, `None` is equivalent to `"world"` and corresponds to + an RAS+ coordinate system. + """ return self._coords From 2b765df29e6338968b49807c99093d8b7280ded4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 18 Sep 2023 15:08:17 -0400 Subject: [PATCH 348/702] MNT: Update pre-commit hooks --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1fc7efd0b9..137aa49462 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ exclude: ".*/data/.*" repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.4.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -21,12 +21,12 @@ repos: hooks: - id: isort - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 + rev: 6.1.0 hooks: - id: flake8 exclude: "^(doc|nisext|tools)/" - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.991 + rev: v1.5.1 hooks: - id: mypy # Sync with project.optional-dependencies.typing From 3f9b623f448305ce6c79c521201978dfbd315f19 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 18 Sep 2023 15:09:04 -0400 Subject: [PATCH 349/702] RF: Recast Pointset as a dataclass with associated affines --- nibabel/pointset.py | 226 +++++++++++++++++++++++++++++++++----------- 1 file changed, 173 insertions(+), 53 deletions(-) diff --git a/nibabel/pointset.py b/nibabel/pointset.py index 91b7531404..c131b81314 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -1,34 +1,151 @@ -import operator as op -from functools import reduce +"""Point-set structures + +Imaging data are sampled at points in space, and these points +can be described by coordinates. +These structures are designed to enable operations on sets of +points, as opposed to the data sampled at those points. + +Abstractly, a point set is any collection of points, but there are +two types that warrant special consideration in the neuroimaging +context: grids and meshes. + +A *grid* is a collection of regularly-spaced points. The canonical +examples of grids are the indices of voxels and their affine +projection into a reference space. + +A *mesh* is a collection of points and some structure that enables +adjacent points to be identified. A *triangular mesh* in particular +uses triplets of adjacent vertices to describe faces. +""" +from __future__ import annotations + +import math +import typing as ty +from dataclasses import dataclass, replace import numpy as np -from nibabel.affines import apply_affine +from nibabel.casting import able_int_type +from nibabel.fileslice import strided_scalar +from nibabel.spatialimages import SpatialImage + +if ty.TYPE_CHECKING: # pragma: no cover + from typing_extensions import Self + _DType = ty.TypeVar('_DType', bound=np.dtype[ty.Any]) + +class CoordinateArray(ty.Protocol): + ndim: int + shape: tuple[int, int] + + @ty.overload + def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: + ... # pragma: no cover + + @ty.overload + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: + ... # pragma: no cover + + +@dataclass class Pointset: - def __init__(self, coords): - self._coords = coords + """A collection of points described by coordinates. + + Parameters + ---------- + coords : array-like + 2-dimensional array with coordinates as rows + affine : :class:`numpy.ndarray` + Affine transform to be applied to coordinates array + homogeneous : :class:`bool` + Indicate whether the provided coordinates are homogeneous, + i.e., homogeneous 3D coordinates have the form ``(x, y, z, 1)`` + """ + + coordinates: CoordinateArray + affine: np.ndarray + homogeneous: bool = False + ndim = 2 + __array_priority__ = 99 + + def __init__( + self, + coordinates: CoordinateArray, + affine: np.ndarray | None = None, + homogeneous: bool = False, + ): + self.coordinates = coordinates + self.homogeneous = homogeneous + + if affine is None: + self.affine = np.eye(self.dim + 1) + else: + self.affine = np.asanyarray(affine) + + if self.affine.shape != (self.dim + 1,) * 2: + raise ValueError(f'Invalid affine for {self.dim}D coordinates:\n{self.affine}') + if np.any(self.affine[-1, :-1] != 0) or self.affine[-1, -1] != 1: + raise ValueError(f'Invalid affine matrix:\n{self.affine}') + + @property + def shape(self) -> tuple[int, int]: + """The shape of the coordinate array""" + return self.coordinates.shape @property - def n_coords(self): + def n_coords(self) -> int: """Number of coordinates Subclasses should override with more efficient implementations. """ - return self.get_coords().shape[0] + return self.coordinates.shape[0] + + @property + def dim(self) -> int: + """The dimensionality of the space the coordinates are in""" + return self.coordinates.shape[1] - self.homogeneous + + def __rmatmul__(self, affine: np.ndarray) -> Self: + """Apply an affine transformation to the pointset + + This will return a new pointset with an updated affine matrix only. + """ + return replace(self, affine=np.asanyarray(affine) @ self.affine) + + def _homogeneous_coords(self): + if self.homogeneous: + return np.asanyarray(self.coordinates) + + ones = strided_scalar( + shape=(self.coordinates.shape[0], 1), + scalar=np.array(1, dtype=self.coordinates.dtype), + ) + return np.hstack((self.coordinates, ones)) + + def get_coords(self, *, as_homogeneous: bool = False): + """Retrieve the coordinates - def get_coords(self, name=None): - """Nx3 array of coordinates. - Parameters ---------- - name : :obj:`str` + as_homogeneous : :class:`bool` + Return homogeneous coordinates if ``True``, or Cartesian + coordiantes if ``False``. + + name : :class:`str` Select a particular coordinate system if more than one may exist. By default, `None` is equivalent to `"world"` and corresponds to an RAS+ coordinate system. """ - return self._coords + ident = np.allclose(self.affine, np.eye(self.affine.shape[0])) + if self.homogeneous == as_homogeneous and ident: + return np.asanyarray(self.coordinates) + coords = self._homogeneous_coords() + if not ident: + coords = (self.affine @ coords.T).T + if not as_homogeneous: + coords = coords[:, :-1] + return coords class TriangularMesh(Pointset): @@ -65,14 +182,6 @@ def get_names(self): """ raise NotImplementedError - ## This method is called for by the BIAP, but it now seems simpler to wait to - ## provide it until there are any proposed implementations - # def decimate(self, *, n_coords=None, ratio=None): - # """ Return a TriangularMesh with a smaller number of vertices that - # preserves the geometry of the original """ - # # To be overridden when a format provides optimization opportunities - # raise NotImplementedError - class TriMeshFamily(TriangularMesh): def __init__(self, mapping, default=None): @@ -97,40 +206,51 @@ def get_coords(self, name=None): return self._coords[name] -class NdGrid(Pointset): - """ - Attributes - ---------- - shape : 3-tuple - number of coordinates in each dimension of grid +class Grid(Pointset): + r"""A regularly-spaced collection of coordinates + + This class provides factory methods for generating Pointsets from + :class:`~nibabel.spatialimages.SpatialImage`\s and generating masks + from coordinate sets. """ - def __init__(self, shape, affines): - self.shape = tuple(shape) - try: - self._affines = dict(affines) - except (TypeError, ValueError): - self._affines = {'world': np.array(affines)} - if 'voxels' not in self._affines: - self._affines['voxels'] = np.eye(4, dtype=np.uint8) - - def get_affine(self, name=None): - """4x4 array""" - if name is None: - name = next(iter(self._affines)) - return self._affines[name] + @classmethod + def from_image(cls, spatialimage: SpatialImage) -> Self: + return cls(coordinates=GridIndices(spatialimage.shape[:3]), affine=spatialimage.affine) - def get_coords(self, name=None): - if name is None: - name = next(iter(self._affines)) - aff = self.get_affine(name) - dt = np.result_type(*(np.min_scalar_type(dim) for dim in self.shape)) - # This is pretty wasteful; we almost certainly want instead an - # object that will retrieve a coordinate when indexed, but where - # np.array(obj) returns this - ijk_coords = np.array(list(np.ndindex(self.shape)), dtype=dt) - return apply_affine(aff, ijk_coords) + @classmethod + def from_mask(cls, mask: SpatialImage) -> Self: + mask_arr = np.bool_(mask.dataobj) + return cls( + coordinates=np.c_[np.nonzero(mask_arr)].astype(able_int_type(mask.shape)), + affine=mask.affine, + ) - @property - def n_coords(self): - return reduce(op.mul, self.shape) + def to_mask(self, shape=None) -> SpatialImage: + if shape is None: + shape = tuple(np.max(self.coordinates, axis=1)[: self.dim]) + mask_arr = np.zeros(shape, dtype='bool') + mask_arr[np.asanyarray(self.coordinates)[:, : self.dim]] = True + return SpatialImage(mask_arr, self.affine) + + +class GridIndices: + """Class for generating indices just-in-time""" + + __slots__ = ('gridshape', 'dtype', 'shape') + ndim = 2 + + def __init__(self, shape, dtype=None): + self.gridshape = shape + self.dtype = dtype or able_int_type(shape) + self.shape = (math.prod(self.gridshape), len(self.gridshape)) + + def __repr__(self): + return f'<{self.__class__.__name__}{self.gridshape}>' + + def __array__(self, dtype=None): + if dtype is None: + dtype = self.dtype + + axes = [np.arange(s, dtype=dtype) for s in self.gridshape] + return np.reshape(np.meshgrid(*axes, copy=False, indexing='ij'), (len(axes), -1)).T From f19ef3348b5a3a1dfe48eac59ec5f8e4d9ff0054 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 18 Sep 2023 15:37:20 -0400 Subject: [PATCH 350/702] TEST: Test Pointset and GridIndices classes --- nibabel/tests/test_pointset.py | 122 +++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/nibabel/tests/test_pointset.py b/nibabel/tests/test_pointset.py index efea8bbd7a..88001b401c 100644 --- a/nibabel/tests/test_pointset.py +++ b/nibabel/tests/test_pointset.py @@ -2,8 +2,10 @@ from unittest import skipUnless import numpy as np +import pytest from nibabel import pointset as ps +from nibabel.affines import apply_affine from nibabel.arrayproxy import ArrayProxy from nibabel.onetime import auto_attr from nibabel.optpkg import optional_package @@ -14,6 +16,126 @@ FS_DATA = Path(get_nibabel_data()) / 'nitest-freesurfer' +class TestPointsets: + rng = np.random.default_rng() + + @pytest.mark.parametrize('shape', [(5, 2), (5, 3), (5, 4)]) + @pytest.mark.parametrize('homogeneous', [True, False]) + def test_init(self, shape, homogeneous): + coords = self.rng.random(shape) + + if homogeneous: + coords = np.column_stack([coords, np.ones(shape[0])]) + + expected_shape = (shape[0], shape[1] + homogeneous) + + points = ps.Pointset(coords, homogeneous=homogeneous) + assert points.shape == expected_shape + assert np.allclose(points.affine, np.eye(shape[1] + 1)) + assert points.homogeneous is homogeneous + assert points.ndim == 2 + assert points.n_coords == shape[0] + assert points.dim == shape[1] + + points = ps.Pointset(coords, affine=np.diag([2] * shape[1] + [1]), homogeneous=homogeneous) + assert points.shape == expected_shape + assert np.allclose(points.affine, np.diag([2] * shape[1] + [1])) + assert points.homogeneous is homogeneous + assert points.ndim == 2 + assert points.n_coords == shape[0] + assert points.dim == shape[1] + + # Badly shaped affine + with pytest.raises(ValueError): + ps.Pointset(coords, affine=[0, 1]) + + # Badly valued affine + with pytest.raises(ValueError): + ps.Pointset(coords, affine=np.ones((shape[1] + 1, shape[1] + 1))) + + @pytest.mark.parametrize('shape', [(5, 2), (5, 3), (5, 4)]) + @pytest.mark.parametrize('homogeneous', [True, False]) + def test_affines(self, shape, homogeneous): + orig_coords = coords = self.rng.random(shape) + + if homogeneous: + coords = np.column_stack([coords, np.ones(shape[0])]) + + points = ps.Pointset(coords, homogeneous=homogeneous) + assert np.allclose(points.get_coords(), orig_coords) + + # Apply affines + scaler = np.diag([2] * shape[1] + [1]) + scaled = scaler @ points + assert np.array_equal(scaled.coordinates, points.coordinates) + assert np.array_equal(scaled.affine, scaler) + assert np.allclose(scaled.get_coords(), 2 * orig_coords) + + flipper = np.eye(shape[1] + 1) + # [[1, 0, 0], [0, 1, 0], [0, 0, 1]] becomes [[0, 1, 0], [1, 0, 0], [0, 0, 1]] + flipper[:-1] = flipper[-2::-1] + flipped = flipper @ points + assert np.array_equal(flipped.coordinates, points.coordinates) + assert np.array_equal(flipped.affine, flipper) + assert np.allclose(flipped.get_coords(), orig_coords[:, ::-1]) + + # Concatenate affines, with any associativity + for doubledup in [(scaler @ flipper) @ points, scaler @ (flipper @ points)]: + assert np.array_equal(doubledup.coordinates, points.coordinates) + assert np.allclose(doubledup.affine, scaler @ flipper) + assert np.allclose(doubledup.get_coords(), 2 * orig_coords[:, ::-1]) + + def test_homogeneous_coordinates(self): + ccoords = self.rng.random((5, 3)) + hcoords = np.column_stack([ccoords, np.ones(5)]) + + cartesian = ps.Pointset(ccoords) + homogeneous = ps.Pointset(hcoords, homogeneous=True) + + for points in (cartesian, homogeneous): + assert np.array_equal(points.get_coords(), ccoords) + assert np.array_equal(points.get_coords(as_homogeneous=True), hcoords) + + affine = np.diag([2, 3, 4, 1]) + cart2 = affine @ cartesian + homo2 = affine @ homogeneous + + exp_c = apply_affine(affine, ccoords) + exp_h = (affine @ hcoords.T).T + for points in (cart2, homo2): + assert np.array_equal(points.get_coords(), exp_c) + assert np.array_equal(points.get_coords(as_homogeneous=True), exp_h) + + +def test_GridIndices(): + # 2D case + shape = (2, 3) + gi = ps.GridIndices(shape) + + assert gi.dtype == np.dtype('u1') + assert gi.shape == (6, 2) + assert repr(gi) == '' + + gi_arr = np.asanyarray(gi) + assert gi_arr.dtype == np.dtype('u1') + assert gi_arr.shape == (6, 2) + # Tractable to write out + assert np.array_equal(gi_arr, [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]]) + + shape = (2, 3, 4) + gi = ps.GridIndices(shape) + + assert gi.dtype == np.dtype('u1') + assert gi.shape == (24, 3) + assert repr(gi) == '' + + gi_arr = np.asanyarray(gi) + assert gi_arr.dtype == np.dtype('u1') + assert gi_arr.shape == (24, 3) + # Separate implementation + assert np.array_equal(gi_arr, np.mgrid[:2, :3, :4].reshape(3, -1).T) + + class H5ArrayProxy: def __init__(self, file_like, dataset_name): self.file_like = file_like From 1e246154a4f72224a626b5dcce5f244dc4034c1f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 18 Sep 2023 16:34:10 -0400 Subject: [PATCH 351/702] TEST: Test Grid methods --- nibabel/tests/test_pointset.py | 54 ++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/nibabel/tests/test_pointset.py b/nibabel/tests/test_pointset.py index 88001b401c..35d47428e7 100644 --- a/nibabel/tests/test_pointset.py +++ b/nibabel/tests/test_pointset.py @@ -1,3 +1,4 @@ +from math import prod from pathlib import Path from unittest import skipUnless @@ -7,8 +8,10 @@ from nibabel import pointset as ps from nibabel.affines import apply_affine from nibabel.arrayproxy import ArrayProxy +from nibabel.fileslice import strided_scalar from nibabel.onetime import auto_attr from nibabel.optpkg import optional_package +from nibabel.spatialimages import SpatialImage from nibabel.tests.nibabel_data import get_nibabel_data h5, has_h5py, _ = optional_package('h5py') @@ -136,6 +139,57 @@ def test_GridIndices(): assert np.array_equal(gi_arr, np.mgrid[:2, :3, :4].reshape(3, -1).T) +class TestGrids(TestPointsets): + @pytest.mark.parametrize('shape', [(5, 5, 5), (5, 5, 5, 5), (5, 5, 5, 5, 5)]) + def test_from_image(self, shape): + # Check image is generates voxel coordinates + affine = np.diag([2, 3, 4, 1]) + img = SpatialImage(strided_scalar(shape), affine) + grid = ps.Grid.from_image(img) + grid_coords = grid.get_coords() + + assert grid.shape == (prod(shape[:3]), 3) + assert np.allclose(grid.affine, affine) + + assert np.allclose(grid_coords[0], [0, 0, 0]) + # Final index is [4, 4, 4], scaled by affine + assert np.allclose(grid_coords[-1], [8, 12, 16]) + + def test_from_mask(self): + affine = np.diag([2, 3, 4, 1]) + mask = np.zeros((3, 3, 3)) + mask[1, 1, 1] = 1 + img = SpatialImage(mask, affine) + + grid = ps.Grid.from_mask(img) + grid_coords = grid.get_coords() + + assert grid.shape == (1, 3) + assert np.array_equal(grid_coords, [[2, 3, 4]]) + + def test_to_mask(self): + coords = np.array([[1, 1, 1]]) + + grid = ps.Grid(coords) + + mask_img = grid.to_mask() + assert mask_img.shape == (2, 2, 2) + assert np.array_equal(mask_img.get_fdata(), [[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) + assert np.array_equal(mask_img.affine, np.eye(4)) + + mask_img = grid.to_mask(shape=(3, 3, 3)) + assert mask_img.shape == (3, 3, 3) + assert np.array_equal( + mask_img.get_fdata(), + [ + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + [[0, 0, 0], [0, 1, 0], [0, 0, 0]], + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + ], + ) + assert np.array_equal(mask_img.affine, np.eye(4)) + + class H5ArrayProxy: def __init__(self, file_like, dataset_name): self.file_like = file_like From 8ac45d061847a3305fdfba044c01f2d008de6cb2 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 19 Sep 2023 09:37:01 -0400 Subject: [PATCH 352/702] Apply suggestions from code review Co-authored-by: Oscar Esteban --- nibabel/pointset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/pointset.py b/nibabel/pointset.py index c131b81314..cdb08c8cce 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -55,7 +55,7 @@ class Pointset: Parameters ---------- coords : array-like - 2-dimensional array with coordinates as rows + (*N*, *n*) array with *N* being points and columns their *n*-dimensional coordinates affine : :class:`numpy.ndarray` Affine transform to be applied to coordinates array homogeneous : :class:`bool` From f55c2868670676c35d7bc66a5fca6ea34c5057aa Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 19 Sep 2023 12:57:37 -0400 Subject: [PATCH 353/702] RF: Drop ndim/shape attributes, make explicit comment on __array_priority__ --- nibabel/pointset.py | 8 ++------ nibabel/tests/test_pointset.py | 18 ++++++------------ 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/nibabel/pointset.py b/nibabel/pointset.py index cdb08c8cce..162466d90b 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -66,7 +66,8 @@ class Pointset: coordinates: CoordinateArray affine: np.ndarray homogeneous: bool = False - ndim = 2 + + # Force use of __rmatmul__ with numpy arrays __array_priority__ = 99 def __init__( @@ -88,11 +89,6 @@ def __init__( if np.any(self.affine[-1, :-1] != 0) or self.affine[-1, -1] != 1: raise ValueError(f'Invalid affine matrix:\n{self.affine}') - @property - def shape(self) -> tuple[int, int]: - """The shape of the coordinate array""" - return self.coordinates.shape - @property def n_coords(self) -> int: """Number of coordinates diff --git a/nibabel/tests/test_pointset.py b/nibabel/tests/test_pointset.py index 35d47428e7..49c51251c9 100644 --- a/nibabel/tests/test_pointset.py +++ b/nibabel/tests/test_pointset.py @@ -30,23 +30,15 @@ def test_init(self, shape, homogeneous): if homogeneous: coords = np.column_stack([coords, np.ones(shape[0])]) - expected_shape = (shape[0], shape[1] + homogeneous) - points = ps.Pointset(coords, homogeneous=homogeneous) - assert points.shape == expected_shape assert np.allclose(points.affine, np.eye(shape[1] + 1)) assert points.homogeneous is homogeneous - assert points.ndim == 2 - assert points.n_coords == shape[0] - assert points.dim == shape[1] + assert (points.n_coords, points.dim) == shape points = ps.Pointset(coords, affine=np.diag([2] * shape[1] + [1]), homogeneous=homogeneous) - assert points.shape == expected_shape assert np.allclose(points.affine, np.diag([2] * shape[1] + [1])) assert points.homogeneous is homogeneous - assert points.ndim == 2 - assert points.n_coords == shape[0] - assert points.dim == shape[1] + assert (points.n_coords, points.dim) == shape # Badly shaped affine with pytest.raises(ValueError): @@ -148,7 +140,8 @@ def test_from_image(self, shape): grid = ps.Grid.from_image(img) grid_coords = grid.get_coords() - assert grid.shape == (prod(shape[:3]), 3) + assert grid.n_coords == prod(shape[:3]) + assert grid.dim == 3 assert np.allclose(grid.affine, affine) assert np.allclose(grid_coords[0], [0, 0, 0]) @@ -164,7 +157,8 @@ def test_from_mask(self): grid = ps.Grid.from_mask(img) grid_coords = grid.get_coords() - assert grid.shape == (1, 3) + assert grid.n_coords == 1 + assert grid.dim == 3 assert np.array_equal(grid_coords, [[2, 3, 4]]) def test_to_mask(self): From c3ba28d558086b35baf2a9009d8ae099f397dcb5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 19 Sep 2023 12:59:47 -0400 Subject: [PATCH 354/702] FIX: to_mask() implementation --- nibabel/pointset.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/pointset.py b/nibabel/pointset.py index 162466d90b..324b76d360 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -224,9 +224,9 @@ def from_mask(cls, mask: SpatialImage) -> Self: def to_mask(self, shape=None) -> SpatialImage: if shape is None: - shape = tuple(np.max(self.coordinates, axis=1)[: self.dim]) + shape = tuple(np.max(self.coordinates, axis=0)[: self.dim] + 1) mask_arr = np.zeros(shape, dtype='bool') - mask_arr[np.asanyarray(self.coordinates)[:, : self.dim]] = True + mask_arr[tuple(np.asanyarray(self.coordinates)[:, : self.dim].T)] = True return SpatialImage(mask_arr, self.affine) From 5ded8517cb230712c8ecd70c9f0c510d2533874a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 19 Sep 2023 14:33:27 -0400 Subject: [PATCH 355/702] RF: Drop triangular meshes for now --- nibabel/pointset.py | 58 ------------- nibabel/tests/test_pointset.py | 152 --------------------------------- 2 files changed, 210 deletions(-) diff --git a/nibabel/pointset.py b/nibabel/pointset.py index 324b76d360..b40449801d 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -144,64 +144,6 @@ def get_coords(self, *, as_homogeneous: bool = False): return coords -class TriangularMesh(Pointset): - def __init__(self, mesh): - if isinstance(mesh, tuple) and len(mesh) == 2: - coords, self._triangles = mesh - elif hasattr(mesh, 'coords') and hasattr(mesh, 'triangles'): - coords = mesh.coords - self._triangles = mesh.triangles - elif hasattr(mesh, 'get_mesh'): - coords, self._triangles = mesh.get_mesh() - else: - raise ValueError('Cannot interpret input as triangular mesh') - super().__init__(coords) - - @property - def n_triangles(self): - """Number of faces - - Subclasses should override with more efficient implementations. - """ - return self._triangles.shape[0] - - def get_triangles(self): - """Mx3 array of indices into coordinate table""" - return self._triangles - - def get_mesh(self, name=None): - return self.get_coords(name=name), self.get_triangles() - - def get_names(self): - """List of surface names that can be passed to - ``get_{coords,triangles,mesh}`` - """ - raise NotImplementedError - - -class TriMeshFamily(TriangularMesh): - def __init__(self, mapping, default=None): - self._triangles = None - self._coords = {} - for name, mesh in dict(mapping).items(): - coords, triangles = TriangularMesh(mesh).get_mesh() - if self._triangles is None: - self._triangles = triangles - self._coords[name] = coords - - if default is None: - default = next(iter(self._coords)) - self._default = default - - def get_names(self): - return list(self._coords) - - def get_coords(self, name=None): - if name is None: - name = self._default - return self._coords[name] - - class Grid(Pointset): r"""A regularly-spaced collection of coordinates diff --git a/nibabel/tests/test_pointset.py b/nibabel/tests/test_pointset.py index 49c51251c9..fb9a7c5c81 100644 --- a/nibabel/tests/test_pointset.py +++ b/nibabel/tests/test_pointset.py @@ -182,155 +182,3 @@ def test_to_mask(self): ], ) assert np.array_equal(mask_img.affine, np.eye(4)) - - -class H5ArrayProxy: - def __init__(self, file_like, dataset_name): - self.file_like = file_like - self.dataset_name = dataset_name - with h5.File(file_like, 'r') as h5f: - arr = h5f[dataset_name] - self._shape = arr.shape - self._dtype = arr.dtype - - @property - def is_proxy(self): - return True - - @property - def shape(self): - return self._shape - - @property - def ndim(self): - return len(self.shape) - - @property - def dtype(self): - return self._dtype - - def __array__(self, dtype=None): - with h5.File(self.file_like, 'r') as h5f: - return np.asanyarray(h5f[self.dataset_name], dtype) - - def __getitem__(self, slicer): - with h5.File(self.file_like, 'r') as h5f: - return h5f[self.dataset_name][slicer] - - -class H5Geometry(ps.TriMeshFamily): - """Simple Geometry file structure that combines a single topology - with one or more coordinate sets - """ - - @classmethod - def from_filename(klass, pathlike): - meshes = {} - with h5.File(pathlike, 'r') as h5f: - triangles = H5ArrayProxy(pathlike, '/topology') - for name in h5f['coordinates']: - meshes[name] = (H5ArrayProxy(pathlike, f'/coordinates/{name}'), triangles) - return klass(meshes) - - def to_filename(self, pathlike): - with h5.File(pathlike, 'w') as h5f: - h5f.create_dataset('/topology', data=self.get_triangles()) - for name, coord in self._coords.items(): - h5f.create_dataset(f'/coordinates/{name}', data=coord) - - -class FSGeometryProxy: - def __init__(self, pathlike): - self._file_like = str(Path(pathlike)) - self._offset = None - self._vnum = None - self._fnum = None - - def _peek(self): - from nibabel.freesurfer.io import _fread3 - - with open(self._file_like, 'rb') as fobj: - magic = _fread3(fobj) - if magic != 16777214: - raise NotImplementedError('Triangle files only!') - fobj.readline() - fobj.readline() - self._vnum = np.fromfile(fobj, '>i4', 1)[0] - self._fnum = np.fromfile(fobj, '>i4', 1)[0] - self._offset = fobj.tell() - - @property - def vnum(self): - if self._vnum is None: - self._peek() - return self._vnum - - @property - def fnum(self): - if self._fnum is None: - self._peek() - return self._fnum - - @property - def offset(self): - if self._offset is None: - self._peek() - return self._offset - - @auto_attr - def coords(self): - ap = ArrayProxy(self._file_like, ((self.vnum, 3), '>f4', self.offset)) - ap.order = 'C' - return ap - - @auto_attr - def triangles(self): - offset = self.offset + 12 * self.vnum - ap = ArrayProxy(self._file_like, ((self.fnum, 3), '>i4', offset)) - ap.order = 'C' - return ap - - -class FreeSurferHemisphere(ps.TriMeshFamily): - @classmethod - def from_filename(klass, pathlike): - path = Path(pathlike) - hemi, default = path.name.split('.') - mesh_names = ( - 'orig', - 'white', - 'smoothwm', - 'pial', - 'inflated', - 'sphere', - 'midthickness', - 'graymid', - ) # Often created - if default not in mesh_names: - mesh_names.append(default) - meshes = {} - for mesh in mesh_names: - fpath = path.parent / f'{hemi}.{mesh}' - if fpath.exists(): - meshes[mesh] = FSGeometryProxy(fpath) - hemi = klass(meshes) - hemi._default = default - return hemi - - -def test_FreeSurferHemisphere(): - lh = FreeSurferHemisphere.from_filename(FS_DATA / 'fsaverage/surf/lh.white') - assert lh.n_coords == 163842 - assert lh.n_triangles == 327680 - - -@skipUnless(has_h5py, reason='Test requires h5py') -def test_make_H5Geometry(tmp_path): - lh = FreeSurferHemisphere.from_filename(FS_DATA / 'fsaverage/surf/lh.white') - h5geo = H5Geometry({name: lh.get_mesh(name) for name in ('white', 'pial')}) - h5geo.to_filename(tmp_path / 'geometry.h5') - - rt_h5geo = H5Geometry.from_filename(tmp_path / 'geometry.h5') - assert set(h5geo._coords) == set(rt_h5geo._coords) - assert np.array_equal(lh.get_coords('white'), rt_h5geo.get_coords('white')) - assert np.array_equal(lh.get_triangles(), rt_h5geo.get_triangles()) From 7071f00e84b1728a1cdfc402fee1e2bebc50b3d4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 19 Sep 2023 15:18:02 -0400 Subject: [PATCH 356/702] ENH: Expand CIFTI2 constants to use a synonym recoder --- nibabel/cifti2/cifti2.py | 93 ++++++++++++++++++++++------------- nibabel/cifti2/cifti2_axes.py | 8 +-- 2 files changed, 63 insertions(+), 38 deletions(-) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index b41521f0cd..9970e941f8 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -30,7 +30,7 @@ from ..filebasedimages import FileBasedHeader, SerializableImage from ..nifti1 import Nifti1Extensions from ..nifti2 import Nifti2Header, Nifti2Image -from ..volumeutils import make_dt_codes +from ..volumeutils import Recoder, make_dt_codes def _float_01(val): @@ -80,39 +80,64 @@ class Cifti2HeaderError(Exception): 'RADIAN', ) -CIFTI_BRAIN_STRUCTURES = ( - 'CIFTI_STRUCTURE_ACCUMBENS_LEFT', - 'CIFTI_STRUCTURE_ACCUMBENS_RIGHT', - 'CIFTI_STRUCTURE_ALL_WHITE_MATTER', - 'CIFTI_STRUCTURE_ALL_GREY_MATTER', - 'CIFTI_STRUCTURE_AMYGDALA_LEFT', - 'CIFTI_STRUCTURE_AMYGDALA_RIGHT', - 'CIFTI_STRUCTURE_BRAIN_STEM', - 'CIFTI_STRUCTURE_CAUDATE_LEFT', - 'CIFTI_STRUCTURE_CAUDATE_RIGHT', - 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_LEFT', - 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_RIGHT', - 'CIFTI_STRUCTURE_CEREBELLUM', - 'CIFTI_STRUCTURE_CEREBELLUM_LEFT', - 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT', - 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_LEFT', - 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_RIGHT', - 'CIFTI_STRUCTURE_CORTEX', - 'CIFTI_STRUCTURE_CORTEX_LEFT', - 'CIFTI_STRUCTURE_CORTEX_RIGHT', - 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', - 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', - 'CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', - 'CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', - 'CIFTI_STRUCTURE_OTHER', - 'CIFTI_STRUCTURE_OTHER_GREY_MATTER', - 'CIFTI_STRUCTURE_OTHER_WHITE_MATTER', - 'CIFTI_STRUCTURE_PALLIDUM_LEFT', - 'CIFTI_STRUCTURE_PALLIDUM_RIGHT', - 'CIFTI_STRUCTURE_PUTAMEN_LEFT', - 'CIFTI_STRUCTURE_PUTAMEN_RIGHT', - 'CIFTI_STRUCTURE_THALAMUS_LEFT', - 'CIFTI_STRUCTURE_THALAMUS_RIGHT', + +def _full_structure(struct): + """Expands STRUCT_NAME into: + + STRUCT_NAME, CIFTI_STRUCTURE_STRUCT_NAME, StructName + """ + return ( + struct, + f'CIFTI_STRUCTURE_{struct}', + ''.join(word.capitalize() for word in struct.split('_')), + ) + + +CIFTI_BRAIN_STRUCTURES = Recoder( + ( + # For simplicity of comparison, use the ordering from: + # https://github.com/Washington-University/workbench/blob/b985f5d/src/Common/StructureEnum.cxx + # (name, ciftiname, guiname) + # ('CORTEX_LEFT', 'CIFTI_STRUCTURE_CORTEX_LEFT', 'CortexLeft') + _full_structure('CORTEX_LEFT'), + _full_structure('CORTEX_RIGHT'), + _full_structure('CEREBELLUM'), + _full_structure('ACCUMBENS_LEFT'), + _full_structure('ACCUMBENS_RIGHT'), + _full_structure('ALL'), + _full_structure('ALL_GREY_MATTER'), + _full_structure('ALL_WHITE_MATTER'), + _full_structure('AMYGDALA_LEFT'), + _full_structure('AMYGDALA_RIGHT'), + _full_structure('BRAIN_STEM'), + _full_structure('CAUDATE_LEFT'), + _full_structure('CAUDATE_RIGHT'), + _full_structure('CEREBELLAR_WHITE_MATTER_LEFT'), + _full_structure('CEREBELLAR_WHITE_MATTER_RIGHT'), + _full_structure('CEREBELLUM_LEFT'), + _full_structure('CEREBELLUM_RIGHT'), + _full_structure('CEREBRAL_WHITE_MATTER_LEFT'), + _full_structure('CEREBRAL_WHITE_MATTER_RIGHT'), + _full_structure('CORTEX'), + _full_structure('DIENCEPHALON_VENTRAL_LEFT'), + _full_structure('DIENCEPHALON_VENTRAL_RIGHT'), + _full_structure('HIPPOCAMPUS_LEFT'), + _full_structure('HIPPOCAMPUS_RIGHT'), + _full_structure('INVALID'), + _full_structure('OTHER'), + _full_structure('OTHER_GREY_MATTER'), + _full_structure('OTHER_WHITE_MATTER'), + _full_structure('PALLIDUM_LEFT'), + _full_structure('PALLIDUM_RIGHT'), + _full_structure('PUTAMEN_LEFT'), + _full_structure('PUTAMEN_RIGHT'), + ## Also commented out in connectome_wb; unclear if deprecated, planned, or what + # _full_structure("SUBCORTICAL_WHITE_MATTER_LEFT") + # _full_structure("SUBCORTICAL_WHITE_MATTER_RIGHT") + _full_structure('THALAMUS_LEFT'), + _full_structure('THALAMUS_RIGHT'), + ), + fields=('name', 'ciftiname', 'guiname'), ) diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index bc6069a160..6443a34fb5 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -520,7 +520,7 @@ def to_cifti_brain_structure_name(name): ValueError: raised if the input name does not match a known anatomical structure in CIFTI-2 """ if name in cifti2.CIFTI_BRAIN_STRUCTURES: - return name + return cifti2.CIFTI_BRAIN_STRUCTURES.ciftiname[name] if not isinstance(name, str): if len(name) == 1: structure = name[0] @@ -554,10 +554,10 @@ def to_cifti_brain_structure_name(name): proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}' else: proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}_{orientation.upper()}' - if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: + if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES.ciftiname: raise ValueError( - f'{name} was interpreted as {proposed_name}, which is not ' - 'a valid CIFTI brain structure' + f'{name} was interpreted as {proposed_name}, ' + 'which is not a valid CIFTI brain structure' ) return proposed_name From 20bffc3241085f6d94982ae0217724e25e8bfc7b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 19 Sep 2023 20:24:55 -0400 Subject: [PATCH 357/702] Update nibabel/cifti2/cifti2.py Co-authored-by: Mathias Goncalves --- nibabel/cifti2/cifti2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 9970e941f8..34aed5a9ed 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -81,7 +81,7 @@ class Cifti2HeaderError(Exception): ) -def _full_structure(struct): +def _full_structure(struct: str): """Expands STRUCT_NAME into: STRUCT_NAME, CIFTI_STRUCTURE_STRUCT_NAME, StructName From 67970ac18d62e692272f4913757203f31080eada Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 22 Feb 2022 16:02:10 -0500 Subject: [PATCH 358/702] ENH: Permit XmlSerializable.to_xml() to pass kwargs to ElementTree.tostring() --- nibabel/gifti/gifti.py | 4 ++-- nibabel/xmlutils.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 56efa4ea0f..16261ee679 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -852,7 +852,7 @@ def _to_xml_element(self): GIFTI.append(dar._to_xml_element()) return GIFTI - def to_xml(self, enc='utf-8', *, mode='strict') -> bytes: + def to_xml(self, enc='utf-8', *, mode='strict', **kwargs) -> bytes: """Return XML corresponding to image content""" if mode == 'strict': if any(arr.datatype not in GIFTI_DTYPES for arr in self.darrays): @@ -882,7 +882,7 @@ def to_xml(self, enc='utf-8', *, mode='strict') -> bytes: header = b""" """ - return header + super().to_xml(enc) + return header + super().to_xml(enc, **kwargs) # Avoid the indirection of going through to_file_map def to_bytes(self, enc='utf-8', *, mode='strict'): diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 31637b5e0c..2770bc3ee9 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -21,11 +21,11 @@ def _to_xml_element(self): """Output should be a xml.etree.ElementTree.Element""" raise NotImplementedError() - def to_xml(self, enc='utf-8'): + def to_xml(self, enc='utf-8', **kwargs): """Output should be an xml string with the given encoding. (default: utf-8)""" ele = self._to_xml_element() - return '' if ele is None else tostring(ele, enc) + return '' if ele is None else tostring(ele, enc, **kwargs) class XmlBasedHeader(FileBasedHeader, XmlSerializable): From f8c940db1b9dee22d58742ae46338ed262dbfaad Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 22 Sep 2023 09:14:19 -0400 Subject: [PATCH 359/702] DOC: Improve documentation of XmlSerializable class, update fallback return type --- nibabel/xmlutils.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 2770bc3ee9..dee36a5321 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -15,17 +15,24 @@ class XmlSerializable: - """Basic interface for serializing an object to xml""" + """Basic interface for serializing an object to XML""" - def _to_xml_element(self): + def _to_xml_element(self) -> Element: """Output should be a xml.etree.ElementTree.Element""" - raise NotImplementedError() + raise NotImplementedError + + def to_xml(self, enc='utf-8', **kwargs) -> bytes: + r"""Generate an XML bytestring with a given encoding. - def to_xml(self, enc='utf-8', **kwargs): - """Output should be an xml string with the given encoding. - (default: utf-8)""" + Parameters + ---------- + enc : :class:`string` + Encoding to use for the generated bytestring. Default: 'utf-8' + \*\*kwargs : :class:`dict` + Additional keyword arguments to :func:`xml.etree.ElementTree.tostring`. + """ ele = self._to_xml_element() - return '' if ele is None else tostring(ele, enc, **kwargs) + return b'' if ele is None else tostring(ele, enc, **kwargs) class XmlBasedHeader(FileBasedHeader, XmlSerializable): From cea2f6cfa1c4c101755b5b5e3d97d3735982ead6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 22 Sep 2023 09:54:28 -0400 Subject: [PATCH 360/702] MNT: Skip coverage of abstract methods --- nibabel/xmlutils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index dee36a5321..4a5fb28979 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -19,7 +19,7 @@ class XmlSerializable: def _to_xml_element(self) -> Element: """Output should be a xml.etree.ElementTree.Element""" - raise NotImplementedError + raise NotImplementedError # pragma: no cover def to_xml(self, enc='utf-8', **kwargs) -> bytes: r"""Generate an XML bytestring with a given encoding. @@ -108,10 +108,10 @@ def parse(self, string=None, fname=None, fptr=None): parser.ParseFile(fptr) def StartElementHandler(self, name, attrs): - raise NotImplementedError + raise NotImplementedError # pragma: no cover def EndElementHandler(self, name): - raise NotImplementedError + raise NotImplementedError # pragma: no cover def CharacterDataHandler(self, data): - raise NotImplementedError + raise NotImplementedError # pragma: no cover From f1f99014b8d0af47d8afcaf972107992644d43ef Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Fri, 29 Sep 2023 10:05:55 -0400 Subject: [PATCH 361/702] Allow relative and home paths --- nibabel/filename_parser.py | 5 ++--- nibabel/tests/test_filename_parser.py | 26 +++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 45c50d6830..b3b4f90ff2 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -10,6 +10,7 @@ from __future__ import annotations import os +import pathlib import typing as ty if ty.TYPE_CHECKING: # pragma: no cover @@ -37,9 +38,7 @@ def _stringify_path(filepath_or_buffer: FileSpec) -> str: Adapted from: https://github.com/pandas-dev/pandas/blob/325dd68/pandas/io/common.py#L131-L160 """ - if isinstance(filepath_or_buffer, os.PathLike): - return filepath_or_buffer.__fspath__() - return filepath_or_buffer + return str(pathlib.Path(filepath_or_buffer).expanduser().resolve()) def types_filenames( diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 29da7b6f61..7d2d45eb25 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -7,10 +7,11 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for filename container""" +import pathlib import pytest -from ..filename_parser import TypesFilenamesError, parse_filename, splitext_addext, types_filenames +from ..filename_parser import TypesFilenamesError, parse_filename, splitext_addext, types_filenames, _stringify_path def test_filenames(): @@ -123,3 +124,26 @@ def test_splitext_addext(): assert res == ('..', '', '') res = splitext_addext('...') assert res == ('...', '', '') + + +def test__stringify_path(): + current_directory = pathlib.Path.cwd() + res = _stringify_path('') + assert res == str(current_directory) + res = _stringify_path('fname.ext.gz') + assert res == str(current_directory / 'fname.ext.gz') + res = _stringify_path(pathlib.Path('fname.ext.gz')) + assert res == str(current_directory / 'fname.ext.gz') + + home = pathlib.Path.home() + res = _stringify_path(pathlib.Path('~/fname.ext.gz')) + assert res == str(home) + '/fname.ext.gz' + + res = _stringify_path(pathlib.Path('./fname.ext.gz')) + assert res == str(current_directory / 'fname.ext.gz') + res = _stringify_path(pathlib.Path('../fname.ext.gz')) + assert res == str(current_directory.parent / 'fname.ext.gz') + + + + From 891e462469d823b11426de77f92553f1fad6a0a5 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Fri, 29 Sep 2023 10:17:12 -0400 Subject: [PATCH 362/702] Maintain relative path behavior --- nibabel/filename_parser.py | 2 +- nibabel/tests/test_filename_parser.py | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index b3b4f90ff2..71e55854eb 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -38,7 +38,7 @@ def _stringify_path(filepath_or_buffer: FileSpec) -> str: Adapted from: https://github.com/pandas-dev/pandas/blob/325dd68/pandas/io/common.py#L131-L160 """ - return str(pathlib.Path(filepath_or_buffer).expanduser().resolve()) + return str(pathlib.Path(filepath_or_buffer).expanduser()) def types_filenames( diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 7d2d45eb25..fe5249a8ab 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -128,21 +128,19 @@ def test_splitext_addext(): def test__stringify_path(): current_directory = pathlib.Path.cwd() - res = _stringify_path('') - assert res == str(current_directory) res = _stringify_path('fname.ext.gz') - assert res == str(current_directory / 'fname.ext.gz') + assert res == 'fname.ext.gz' res = _stringify_path(pathlib.Path('fname.ext.gz')) - assert res == str(current_directory / 'fname.ext.gz') + assert res == 'fname.ext.gz' home = pathlib.Path.home() res = _stringify_path(pathlib.Path('~/fname.ext.gz')) assert res == str(home) + '/fname.ext.gz' res = _stringify_path(pathlib.Path('./fname.ext.gz')) - assert res == str(current_directory / 'fname.ext.gz') + assert res == 'fname.ext.gz' res = _stringify_path(pathlib.Path('../fname.ext.gz')) - assert res == str(current_directory.parent / 'fname.ext.gz') + assert res == '../fname.ext.gz' From b70ce832a115af47bedbb661227ae9648aad5643 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Fri, 29 Sep 2023 10:19:00 -0400 Subject: [PATCH 363/702] Push to trigger CI --- nibabel/filename_parser.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 71e55854eb..e25ea9e1d3 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -38,7 +38,8 @@ def _stringify_path(filepath_or_buffer: FileSpec) -> str: Adapted from: https://github.com/pandas-dev/pandas/blob/325dd68/pandas/io/common.py#L131-L160 """ - return str(pathlib.Path(filepath_or_buffer).expanduser()) + full_path = pathlib.Path(filepath_or_buffer).expanduser() + return str(full_path) def types_filenames( From e6e8d40c64c8ce978d9b7201efb2b1ce8755fe53 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Fri, 29 Sep 2023 11:05:18 -0400 Subject: [PATCH 364/702] Restore forward slash behavior --- nibabel/filename_parser.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index e25ea9e1d3..92a2f4b1f5 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -38,8 +38,7 @@ def _stringify_path(filepath_or_buffer: FileSpec) -> str: Adapted from: https://github.com/pandas-dev/pandas/blob/325dd68/pandas/io/common.py#L131-L160 """ - full_path = pathlib.Path(filepath_or_buffer).expanduser() - return str(full_path) + return pathlib.Path(filepath_or_buffer).expanduser().as_posix() def types_filenames( From 47f4dccdfd29150a1fedc4179d91506e92752935 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Fri, 29 Sep 2023 11:48:47 -0400 Subject: [PATCH 365/702] Ensure posix test strings --- nibabel/tests/test_filename_parser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index fe5249a8ab..963f7cc624 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -127,13 +127,13 @@ def test_splitext_addext(): def test__stringify_path(): - current_directory = pathlib.Path.cwd() + current_directory = pathlib.Path.cwd().as_posix() res = _stringify_path('fname.ext.gz') assert res == 'fname.ext.gz' res = _stringify_path(pathlib.Path('fname.ext.gz')) assert res == 'fname.ext.gz' - home = pathlib.Path.home() + home = pathlib.Path.home().as_posix() res = _stringify_path(pathlib.Path('~/fname.ext.gz')) assert res == str(home) + '/fname.ext.gz' From 54ad8596b39777cda2d62f6d5a4233d0314c5953 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Fri, 29 Sep 2023 12:38:24 -0400 Subject: [PATCH 366/702] Update nibabel/tests/test_filename_parser.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_filename_parser.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 963f7cc624..f37b3713b8 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -141,7 +141,3 @@ def test__stringify_path(): assert res == 'fname.ext.gz' res = _stringify_path(pathlib.Path('../fname.ext.gz')) assert res == '../fname.ext.gz' - - - - From 6bf8c890cbd2eb7afc1c68d4fa64bfbcbcc23859 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Tue, 3 Oct 2023 15:23:08 -0400 Subject: [PATCH 367/702] Assert equal pathlib.Path instead of str --- nibabel/freesurfer/tests/test_mghformat.py | 3 ++- nibabel/tests/test_ecat.py | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index ded1aca8a2..5a400119ba 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -10,6 +10,7 @@ import io import os +import pathlib import numpy as np import pytest @@ -291,7 +292,7 @@ def test_mgh_load_fileobj(): # pass the filename to the array proxy, please feel free to change this # test. img = MGHImage.load(MGZ_FNAME) - assert img.dataobj.file_like == MGZ_FNAME + assert pathlib.Path(img.dataobj.file_like) == pathlib.Path(MGZ_FNAME) # Check fileobj also passed into dataobj with ImageOpener(MGZ_FNAME) as fobj: contents = fobj.read() diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index ff74b7b084..c8de98c2d1 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -8,6 +8,7 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import os +import pathlib import warnings from unittest import TestCase @@ -183,8 +184,8 @@ class TestEcatImage(TestCase): img = image_class.load(example_file) def test_file(self): - assert self.img.file_map['header'].filename == self.example_file - assert self.img.file_map['image'].filename == self.example_file + assert pathlib.Path(self.img.file_map['header'].filename) == pathlib.Path(self.example_file) + assert pathlib.Path(self.img.file_map['image'].filename) == pathlib.Path(self.example_file) def test_save(self): tmp_file = 'tinypet_tmp.v' From 77aca5f6c2345ea6c3cbbcd9f0c4a137271a4676 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Wed, 4 Oct 2023 12:34:17 -0400 Subject: [PATCH 368/702] Update nibabel/tests/test_filename_parser.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_filename_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index f37b3713b8..735529e713 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -135,7 +135,7 @@ def test__stringify_path(): home = pathlib.Path.home().as_posix() res = _stringify_path(pathlib.Path('~/fname.ext.gz')) - assert res == str(home) + '/fname.ext.gz' + assert res == f'{home}/fname.ext.gz' res = _stringify_path(pathlib.Path('./fname.ext.gz')) assert res == 'fname.ext.gz' From cca2b4acad5166e21abd18100300dfc053096647 Mon Sep 17 00:00:00 2001 From: Reinder Vos de Wael Date: Wed, 4 Oct 2023 12:34:39 -0400 Subject: [PATCH 369/702] Update nibabel/tests/test_filename_parser.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_filename_parser.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 735529e713..736994e0da 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -127,7 +127,6 @@ def test_splitext_addext(): def test__stringify_path(): - current_directory = pathlib.Path.cwd().as_posix() res = _stringify_path('fname.ext.gz') assert res == 'fname.ext.gz' res = _stringify_path(pathlib.Path('fname.ext.gz')) From 9c146946a0afac7e4e04ec2a2687e693fed0ad31 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 08:40:53 -0400 Subject: [PATCH 370/702] CI: Add 3.12 to the stable test matrix --- .github/workflows/stable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 18a30d6d07..90721bc81b 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -105,7 +105,7 @@ jobs: strategy: matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: [3.8, 3.9, "3.10", "3.11"] + python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] architecture: ['x64', 'x86'] install: ['pip'] check: ['test'] From ec10d70e4a182c60efde30929d7a0de1efc3f5bf Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 08:52:17 -0400 Subject: [PATCH 371/702] NEP29+1y: Bump minimum numpy --- doc/source/installation.rst | 2 +- min-requirements.txt | 2 +- pyproject.toml | 2 +- requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/installation.rst b/doc/source/installation.rst index b896d2dfc1..4f747e7feb 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -84,7 +84,7 @@ Requirements .. check these against pyproject.toml * Python_ 3.8 or greater -* NumPy_ 1.19 or greater +* NumPy_ 1.20 or greater * Packaging_ 17.0 or greater * importlib-resources_ 1.3 or greater (or Python 3.9+) * SciPy_ (optional, for full SPM-ANALYZE support) diff --git a/min-requirements.txt b/min-requirements.txt index e30bc40a2a..1cdd78bb79 100644 --- a/min-requirements.txt +++ b/min-requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py -numpy ==1.19 +numpy ==1.20 packaging ==17 importlib_resources ==1.3; python_version < '3.9' diff --git a/pyproject.toml b/pyproject.toml index 1dbc13b43f..d399ca7d68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ readme = "README.rst" license = { text = "MIT License" } requires-python = ">=3.8" dependencies = [ - "numpy >=1.19", + "numpy >=1.20", "packaging >=17", "importlib_resources >=1.3; python_version < '3.9'", ] diff --git a/requirements.txt b/requirements.txt index a74639cf81..f74ccc0850 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ # Auto-generated by tools/update_requirements.py -numpy >=1.19 +numpy >=1.20 packaging >=17 importlib_resources >=1.3; python_version < '3.9' From df2377a775fece5338d159eef23abdb057d50df6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 09:21:10 -0400 Subject: [PATCH 372/702] MNT: Update tox config to what we actually do --- tox.ini | 75 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 62 insertions(+), 13 deletions(-) diff --git a/tox.ini b/tox.ini index a0002e12b6..8bdcf5b495 100644 --- a/tox.ini +++ b/tox.ini @@ -1,20 +1,69 @@ [tox] -# From-scratch tox-default-name virtualenvs -envlist = py25,py26,py27,py32 +requires = + tox>=4 +envlist = py38,py39,py310,py311,py312,doctest,style,typecheck + [testenv] +description = Typical pytest invocation with coverage deps = - nose - numpy -commands=nosetests --with-doctest -# MBs virtualenvs; numpy, nose already installed. Run these with: -# tox -e python25,python26,python27,python32,np-1.2.1 -[testenv:python25] + pytest + pytest-doctestplus + pytest-cov + pytest-httpserver + pytest-xdist +commands = + pytest --doctest-modules --doctest-plus \ + --cov nibabel --cov-report xml:cov.xml \ + --junitxml test-results.xml \ + --pyargs nibabel {posargs:-n auto} + +[testenv:doctest] +description = Typical pytest invocation with coverage +allowlist_externals = make deps = -[testenv:python26] + sphinx + pytest + matplotlib>=1.5.3 + numpydoc + texext + tomli; python_version < "3.11" +commands = + make -C doc html + make -C doc doctest + +[testenv:style] +description = Check our style guide deps = -[testenv:python27] + flake8 + blue + isort[colors] +skip_install = true +commands = + blue --diff --color nibabel + isort --diff --color nibabel + flake8 nibabel + +[testenv:style-fix] +description = Auto-apply style guide to the extent possible deps = -[testenv:python32] -deps = -[testenv:np-1.2.1] + blue + isort[colors] +skip_install = true +commands = + blue nibabel + isort nibabel + +[testenv:typecheck] +description = Check type consistency deps = + mypy + pytest + types-setuptools + types-Pillow + pydicom + numpy + pyzstd + importlib_resources +skip_install = true +commands = + mypy nibabel From 4a02317e14f8e769ba2fbeca53db1d6fcab9e1c5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 09:30:54 -0400 Subject: [PATCH 373/702] STY: Apply style fixes --- nibabel/__init__.py | 3 ++- nibabel/minc2.py | 1 + nibabel/tests/test_ecat.py | 6 +++--- nibabel/tests/test_filename_parser.py | 8 +++++++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 09be1d2792..db427435ae 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -39,8 +39,9 @@ # module imports from . import analyze as ana -from . import ecat, imagestats, mriutils, orientations +from . import ecat, imagestats, mriutils from . import nifti1 as ni1 +from . import orientations from . import spm2analyze as spm2 from . import spm99analyze as spm99 from . import streamlines, viewers diff --git a/nibabel/minc2.py b/nibabel/minc2.py index d02eb6cefc..3096ef9499 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -26,6 +26,7 @@ mincstats my_funny.mnc """ import warnings + import numpy as np from .minc1 import Minc1File, Minc1Image, MincError, MincHeader diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index c8de98c2d1..6a076cbc38 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -8,8 +8,8 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import os -import pathlib import warnings +from pathlib import Path from unittest import TestCase import numpy as np @@ -184,8 +184,8 @@ class TestEcatImage(TestCase): img = image_class.load(example_file) def test_file(self): - assert pathlib.Path(self.img.file_map['header'].filename) == pathlib.Path(self.example_file) - assert pathlib.Path(self.img.file_map['image'].filename) == pathlib.Path(self.example_file) + assert Path(self.img.file_map['header'].filename) == Path(self.example_file) + assert Path(self.img.file_map['image'].filename) == Path(self.example_file) def test_save(self): tmp_file = 'tinypet_tmp.v' diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 736994e0da..5d352f72dd 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -11,7 +11,13 @@ import pytest -from ..filename_parser import TypesFilenamesError, parse_filename, splitext_addext, types_filenames, _stringify_path +from ..filename_parser import ( + TypesFilenamesError, + _stringify_path, + parse_filename, + splitext_addext, + types_filenames, +) def test_filenames(): From 79889f8a6825cc5a8baa6912c838bbfcc4ba109c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 09:43:44 -0400 Subject: [PATCH 374/702] TOX: Add -pre environments --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 8bdcf5b495..6ce10286c6 100644 --- a/tox.ini +++ b/tox.ini @@ -1,10 +1,12 @@ [tox] requires = tox>=4 -envlist = py38,py39,py310,py311,py312,doctest,style,typecheck +envlist = py3{8,9,10,11,12}{,-pre},doctest,style,typecheck [testenv] description = Typical pytest invocation with coverage +pip_pre = + py3{8,9,10,11,12}-pre: true deps = pytest pytest-doctestplus From 95af765d226f01a4a94d5bd8f9ffbc31213e5adc Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 09:44:57 -0400 Subject: [PATCH 375/702] TOX: Split doc build from doctest --- tox.ini | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 6ce10286c6..e5c7746fe4 100644 --- a/tox.ini +++ b/tox.ini @@ -19,9 +19,22 @@ commands = --junitxml test-results.xml \ --pyargs nibabel {posargs:-n auto} +[testenv:docs] +description = Typical pytest invocation with coverage +allowlist_externals = make +deps = + sphinx + matplotlib>=1.5.3 + numpydoc + texext + tomli; python_version < "3.11" +commands = + make -C doc html + [testenv:doctest] description = Typical pytest invocation with coverage allowlist_externals = make +depends = docs deps = sphinx pytest @@ -30,7 +43,6 @@ deps = texext tomli; python_version < "3.11" commands = - make -C doc html make -C doc doctest [testenv:style] From 5cb6824b7393bf32c808738343657b8d2342db86 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 10:00:47 -0400 Subject: [PATCH 376/702] TOX: Add build/publish environments --- tox.ini | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tox.ini b/tox.ini index e5c7746fe4..d42a1cee27 100644 --- a/tox.ini +++ b/tox.ini @@ -81,3 +81,22 @@ deps = skip_install = true commands = mypy nibabel + +[testenv:build] +deps = + build + twine +skip_install = true +set_env = + PYTHONWARNINGS=error +commands = + python -m build + python -m twine check dist/* + +[testenv:publish] +depends = build +deps = + twine +skip_install = true +commands = + python -m twine upload dist/* From 54fa30e984207d9e85b25f102f9b5c9b6ee143cb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 11:16:29 -0400 Subject: [PATCH 377/702] MNT: Pacify build warnings --- pyproject.toml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index d399ca7d68..7b774980ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -99,10 +99,19 @@ exclude = [ [tool.hatch.version] source = "vcs" +tag-pattern = '(?P\d+(?:\.\d+){0,2}[^+]*)(?:\+.*)?$' raw-options = { version_scheme = "release-branch-semver" } [tool.hatch.build.hooks.vcs] version-file = "nibabel/_version.py" +# Old default setuptools_scm template; hatch-vcs currently causes +# a noisy warning if template is missing. +template = ''' +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = {version!r} +__version_tuple__ = version_tuple = {version_tuple!r} +''' [tool.blue] line_length = 99 From 51a7384c8f4b948d6a98321d0dd52d4da52be294 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 11:16:43 -0400 Subject: [PATCH 378/702] MNT: Only error on build-strict --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index d42a1cee27..1b73eced0a 100644 --- a/tox.ini +++ b/tox.ini @@ -82,13 +82,13 @@ skip_install = true commands = mypy nibabel -[testenv:build] +[testenv:build{,-strict}] deps = build twine skip_install = true set_env = - PYTHONWARNINGS=error + build-strict: PYTHONWARNINGS=error commands = python -m build python -m twine check dist/* From 4fb412f05528a6966b6ac5ddd9c75b3cfd55e0c9 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 11:17:41 -0400 Subject: [PATCH 379/702] CI: Update to latest checkout --- .github/workflows/stable.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 90721bc81b..3e51553f46 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -35,7 +35,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-python@v4 @@ -157,7 +157,7 @@ jobs: EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 From 55309c0a2bd261b5bdaa5ef62a057aec53c52ef4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 11:58:28 -0400 Subject: [PATCH 380/702] TOX: Encode minimum versions in dependencies, add -min and -full flags --- tox.ini | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 1b73eced0a..c2b0b81cb8 100644 --- a/tox.ini +++ b/tox.ini @@ -1,18 +1,48 @@ [tox] requires = - tox>=4 -envlist = py3{8,9,10,11,12}{,-pre},doctest,style,typecheck + tox>=4 +envlist = + py38{,-min,-full} + py3{9,10}-full + py3{11,12}{,-pre,-full} + doctest + style + typecheck +skip_missing_interpreters = true [testenv] description = Typical pytest invocation with coverage pip_pre = - py3{8,9,10,11,12}-pre: true + pre: true deps = pytest pytest-doctestplus pytest-cov pytest-httpserver pytest-xdist + # NEP29/SPEC0 + 1yr: Test on minor release series within the last 3 years + # We're extending this to all optional dependencies + # This only affects the range that we test on; numpy is the only non-optional + # dependency, and will be the only one to affect pip environment resolution. + min: numpy ==1.20 + min: packaging ==17 + min: importlib_resources ==1.3; python_version < '3.9' + min: scipy ==1.6 + min: matplotlib ==3.4 + min: pillow ==8.1 + min: h5py ==3.0 + min: indexed_gzip ==1.4 + min: pyzstd ==0.13 + full,pre: scipy >=1.6 + full,pre: matplotlib >=3.4 + full,pre: pillow >=8.1 + full,pre: h5py >=3.0 + full,pre: indexed_gzip >=1.4 + full,pre: pyzstd >=0.13 + min: pydicom ==2.1 + full: pydicom >=2.1 + pre: pydicom @ git+https://github.com/pydicom/pydicom.git@master + commands = pytest --doctest-modules --doctest-plus \ --cov nibabel --cov-report xml:cov.xml \ From 22e74723b8ed84452017cb957865ff0aef9fa16c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 12:04:48 -0400 Subject: [PATCH 381/702] Update minimum pyzstd, add zenodo and pre-release environments --- tox.ini | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index c2b0b81cb8..ed76f0a0da 100644 --- a/tox.ini +++ b/tox.ini @@ -32,13 +32,13 @@ deps = min: pillow ==8.1 min: h5py ==3.0 min: indexed_gzip ==1.4 - min: pyzstd ==0.13 + min: pyzstd ==0.14.3 full,pre: scipy >=1.6 full,pre: matplotlib >=3.4 full,pre: pillow >=8.1 full,pre: h5py >=3.0 full,pre: indexed_gzip >=1.4 - full,pre: pyzstd >=0.13 + full,pre: pyzstd >=0.14.3 min: pydicom ==2.1 full: pydicom >=2.1 pre: pydicom @ git+https://github.com/pydicom/pydicom.git@master @@ -130,3 +130,15 @@ deps = skip_install = true commands = python -m twine upload dist/* + +[testenv:zenodo] +deps = gitpython +skip_install = true +commands = + python tools/prep_zenodo.py + +[testenv:pre-release] +depends = + zenodo + style-fix + build From 98b7bb90c156d614ca4165fddf99458137a5596c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 12:06:37 -0400 Subject: [PATCH 382/702] MNT: Convert dev optional dependencies to tox --- pyproject.toml | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7b774980ef..7ae7dbda1f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,39 +49,20 @@ nib-roi = "nibabel.cmdline.roi:main" parrec2nii = "nibabel.cmdline.parrec2nii:main" [project.optional-dependencies] -all = ["nibabel[dicomfs,dev,doc,minc2,spm,style,test,zstd]"] -dev = ["gitpython", "twine", "nibabel[style]"] +all = ["nibabel[dicomfs,minc2,spm,zstd]"] +# Features dicom = ["pydicom >=1.0.0"] dicomfs = ["nibabel[dicom]", "pillow"] -doc = [ - "matplotlib >= 1.5.3", - "numpydoc", - "sphinx ~= 5.3", - "texext", - "tomli; python_version < \"3.11\"", -] -doctest = ["nibabel[doc,test]"] minc2 = ["h5py"] spm = ["scipy"] -style = ["flake8", "blue", "isort"] -test = [ - "coverage", - "pytest !=5.3.4", - "pytest-cov", - "pytest-doctestplus", - "pytest-httpserver", - "pytest-xdist", -] -typing = [ - "mypy", - "importlib_resources", - "pydicom", - "pytest", - "pyzstd", - "types-setuptools", - "types-Pillow", -] zstd = ["pyzstd >= 0.14.3"] +# Dev dependencies: Move to tox, keep aliases to avoid breaking workflows +dev = ["tox"] +doc = ["tox"] +doctest = ["tox"] +style = ["tox"] +test = ["tox"] +typing = ["tox"] [tool.hatch.build.targets.sdist] exclude = [ From 4b6adaae8c4b26fae65007d5a4f70a3c99da955f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 12:40:05 -0400 Subject: [PATCH 383/702] CI: Convert pre-release CI jobs to tox --- .github/workflows/pre-release.yml | 45 +++++++------------------------ tox.ini | 36 +++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 37 deletions(-) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 4431c7135f..e55787bd2a 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -35,20 +35,7 @@ jobs: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] python-version: ["3.9", "3.10", "3.11", "3.12"] architecture: ['x64', 'x86'] - install: ['pip'] - check: ['test'] - pip-flags: ['PRE_PIP_FLAGS'] - depends: ['REQUIREMENTS'] - optional-depends: ['DEFAULT_OPT_DEPENDS'] - include: - # Pydicom master - - os: ubuntu-latest - python-version: "3.11" - install: pip - check: test - pip-flags: '' - depends: REQUIREMENTS - optional-depends: PYDICOM_MASTER + dependencies: ['pre'] exclude: - os: ubuntu-latest architecture: x86 @@ -57,13 +44,6 @@ jobs: - python-version: '3.12' architecture: x86 - env: - DEPENDS: ${{ matrix.depends }} - OPTIONAL_DEPENDS: ${{ matrix.optional-depends }} - INSTALL_TYPE: ${{ matrix.install }} - CHECK_TYPE: ${{ matrix.check }} - EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} - steps: - uses: actions/checkout@v3 with: @@ -77,19 +57,14 @@ jobs: allow-prereleases: true - name: Display Python version run: python -c "import sys; print(sys.version)" - - name: Create virtual environment - run: tools/ci/create_venv.sh - - name: Build archive + - name: Install tox run: | - source tools/ci/build_archive.sh - echo "ARCHIVE=$ARCHIVE" >> $GITHUB_ENV - - name: Install dependencies - run: tools/ci/install_dependencies.sh - - name: Install NiBabel - run: tools/ci/install.sh - - name: Run tests - run: tools/ci/check.sh - if: ${{ matrix.check != 'skiptests' }} + python -m pip install --upgrade pip + python -m pip install tox tox-gh-actions + - name: Run tox + run: tox + env: + DEPENDS: ${{ matrix.dependencies }} - uses: codecov/codecov-action@v3 if: ${{ always() }} with: @@ -98,5 +73,5 @@ jobs: uses: actions/upload-artifact@v3 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} - path: for_testing/test-results.xml - if: ${{ always() && matrix.check == 'test' }} + path: test-results.xml + if: ${{ always() }} diff --git a/tox.ini b/tox.ini index ed76f0a0da..0759eaf8a1 100644 --- a/tox.ini +++ b/tox.ini @@ -10,8 +10,32 @@ envlist = typecheck skip_missing_interpreters = true +[gh-actions] +python = + 3.8: py38 + 3.9: py39 + 3.10: py310 + 3.11: py311 + 3.12: py312 + +[gh-actions:env] +DEPENDS = + pre: pre + full: full + min: min + +CHECK = + build: build + doctest: doctest + style: style + typecheck: typecheck + [testenv] description = Typical pytest invocation with coverage +install_command = + python -I -m pip install \ + --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ + {opts} {packages} pip_pre = pre: true deps = @@ -33,6 +57,7 @@ deps = min: h5py ==3.0 min: indexed_gzip ==1.4 min: pyzstd ==0.14.3 + pre: numpy <2.0.dev0 full,pre: scipy >=1.6 full,pre: matplotlib >=3.4 full,pre: pillow >=8.1 @@ -40,8 +65,8 @@ deps = full,pre: indexed_gzip >=1.4 full,pre: pyzstd >=0.14.3 min: pydicom ==2.1 - full: pydicom >=2.1 - pre: pydicom @ git+https://github.com/pydicom/pydicom.git@master + full,pre: pydicom >=2.1 + # pre: pydicom @ git+https://github.com/pydicom/pydicom.git@main commands = pytest --doctest-modules --doctest-plus \ @@ -49,6 +74,13 @@ commands = --junitxml test-results.xml \ --pyargs nibabel {posargs:-n auto} +[testenv:install] +description = "Install and verify imports succeed" +deps = +install_command = python -I -m pip install {opts} {packages} +commands = + python -c "import nibabel; print(nibabel.__version__)" + [testenv:docs] description = Typical pytest invocation with coverage allowlist_externals = make From e905ef1571da965d610e1c9225a3b3c59be88f00 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:09:59 -0400 Subject: [PATCH 384/702] CI: Convert stable CI to tox --- .github/workflows/stable.yml | 69 +++++++++--------------------------- tox.ini | 10 +++++- 2 files changed, 25 insertions(+), 54 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 3e51553f46..9a71ce30e0 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -94,8 +94,8 @@ jobs: if: matrix.package == 'archive' run: pip install archive/nibabel-archive.tgz - run: python -c 'import nibabel; print(nibabel.__version__)' - - name: Install test extras - run: pip install nibabel[test] + - name: Install minimum test dependencies + run: pip install pytest pytest-doctest-plus - name: Run tests run: pytest --doctest-modules --doctest-plus -v --pyargs nibabel @@ -107,57 +107,24 @@ jobs: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] architecture: ['x64', 'x86'] - install: ['pip'] - check: ['test'] - pip-flags: [''] - depends: ['REQUIREMENTS'] - optional-depends: ['DEFAULT_OPT_DEPENDS'] + dependencies: ['full'] include: # Basic dependencies only - os: ubuntu-latest python-version: 3.8 - install: pip - check: test - pip-flags: '' - depends: REQUIREMENTS - optional-depends: '' + dependencies: '' # Absolute minimum dependencies - os: ubuntu-latest python-version: 3.8 - install: pip - check: test - pip-flags: '' - depends: MIN_REQUIREMENTS - optional-depends: '' - # Absolute minimum dependencies plus old MPL, Pydicom, Pillow - - os: ubuntu-latest - python-version: 3.8 - install: pip - check: test - pip-flags: '' - depends: MIN_REQUIREMENTS - optional-depends: MIN_OPT_DEPENDS - # Clean install imports only with package-declared dependencies - - os: ubuntu-latest - python-version: 3.8 - install: pip - check: skiptests - pip-flags: '' - depends: '' + dependencies: 'min' exclude: - os: ubuntu-latest architecture: x86 - os: macos-latest architecture: x86 - env: - DEPENDS: ${{ matrix.depends }} - OPTIONAL_DEPENDS: ${{ matrix.optional-depends }} - INSTALL_TYPE: ${{ matrix.install }} - CHECK_TYPE: ${{ matrix.check }} - EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v3 with: submodules: recursive fetch-depth: 0 @@ -166,31 +133,27 @@ jobs: with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} + allow-prereleases: true - name: Display Python version run: python -c "import sys; print(sys.version)" - - name: Create virtual environment - run: tools/ci/create_venv.sh - - name: Build archive + - name: Install tox run: | - source tools/ci/build_archive.sh - echo "ARCHIVE=$ARCHIVE" >> $GITHUB_ENV - - name: Install dependencies - run: tools/ci/install_dependencies.sh - - name: Install NiBabel - run: tools/ci/install.sh - - name: Run tests - if: ${{ matrix.check != 'skiptests' }} - run: tools/ci/check.sh + python -m pip install --upgrade pip + python -m pip install tox tox-gh-actions + - name: Run tox + run: tox + env: + DEPENDS: ${{ matrix.dependencies }} - uses: codecov/codecov-action@v3 if: ${{ always() }} with: files: cov.xml - name: Upload pytest test results - if: ${{ always() && matrix.check == 'test' }} uses: actions/upload-artifact@v3 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} - path: for_testing/test-results.xml + path: test-results.xml + if: ${{ always() }} publish: runs-on: ubuntu-latest diff --git a/tox.ini b/tox.ini index 0759eaf8a1..e62997298c 100644 --- a/tox.ini +++ b/tox.ini @@ -38,6 +38,12 @@ install_command = {opts} {packages} pip_pre = pre: true +# getpass.getuser() sources for Windows: +pass_env = + LOGNAME + USER + LNAME + USERNAME deps = pytest pytest-doctestplus @@ -61,7 +67,9 @@ deps = full,pre: scipy >=1.6 full,pre: matplotlib >=3.4 full,pre: pillow >=8.1 - full,pre: h5py >=3.0 + full: h5py >=3.0 + # h5py missing 3.12 wheels, so disable from pre for now + # full,pre: h5py >=3.0 full,pre: indexed_gzip >=1.4 full,pre: pyzstd >=0.14.3 min: pydicom ==2.1 From 7b2a0c3e4d43b34055e67c06bbc72c024cfe2f34 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:22:02 -0400 Subject: [PATCH 385/702] CI: Exclude 3.12 on x86, skip full dependencies for 3.12 for now --- .github/workflows/stable.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 9a71ce30e0..79e081aafc 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -105,7 +105,7 @@ jobs: strategy: matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] + python-version: [3.8, 3.9, "3.10", "3.11"] # Waiting on H5Py: , "3.12"] architecture: ['x64', 'x86'] dependencies: ['full'] include: @@ -117,11 +117,22 @@ jobs: - os: ubuntu-latest python-version: 3.8 dependencies: 'min' + - os: ubuntu-latest + python-version: 3.12 + dependencies: '' + - os: windows-latest + python-version: 3.12 + dependencies: '' + - os: macos-latest + python-version: 3.12 + dependencies: '' exclude: - os: ubuntu-latest architecture: x86 - os: macos-latest architecture: x86 + - python-version: '3.12' + architecture: x86 steps: - uses: actions/checkout@v3 From 8fb7abb7781887c8daefcf00d37c82536f569ba0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:22:36 -0400 Subject: [PATCH 386/702] CI: Run miscellaneous checks through tox --- .github/workflows/misc.yml | 40 +++++++++----------------------------- 1 file changed, 9 insertions(+), 31 deletions(-) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 90645b40eb..556d08a339 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -22,17 +22,7 @@ jobs: continue-on-error: true strategy: matrix: - python-version: ["3.10"] - install: ['pip'] - check: ['style', 'doctest', 'typing'] - pip-flags: [''] - depends: ['REQUIREMENTS'] - env: - DEPENDS: ${{ matrix.depends }} - OPTIONAL_DEPENDS: ${{ matrix.optional-depends }} - INSTALL_TYPE: ${{ matrix.install }} - CHECK_TYPE: ${{ matrix.check }} - EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} + check: ['style', 'doctest', 'typecheck'] steps: - uses: actions/checkout@v3 @@ -42,26 +32,14 @@ jobs: - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: - python-version: ${{ matrix.python-version }} - architecture: ${{ matrix.architecture }} + python-version: 3 - name: Display Python version run: python -c "import sys; print(sys.version)" - - name: Create virtual environment - run: tools/ci/create_venv.sh - - name: Build archive + - name: Install tox run: | - source tools/ci/build_archive.sh - echo "ARCHIVE=$ARCHIVE" >> $GITHUB_ENV - - name: Install dependencies - run: tools/ci/install_dependencies.sh - - name: Install NiBabel - run: tools/ci/install.sh - - name: Run tests - run: tools/ci/check.sh - if: ${{ matrix.check != 'skiptests' }} - - name: Upload pytest test results - uses: actions/upload-artifact@v3 - with: - name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} - path: for_testing/test-results.xml - if: ${{ always() && matrix.check == 'test' }} + python -m pip install --upgrade pip + python -m pip install tox tox-gh-actions + - name: Run tox + run: tox + env: + CHECK: ${{ matrix.check }} From 20d3f14a3c5eae4afed2f3d1fc89274f5ec94487 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:29:36 -0400 Subject: [PATCH 387/702] MNT: Require wheels for things that cannot be built on CI --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index e62997298c..e7da703309 100644 --- a/tox.ini +++ b/tox.ini @@ -34,6 +34,7 @@ CHECK = description = Typical pytest invocation with coverage install_command = python -I -m pip install \ + --only-binary numpy,scipy,h5py --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ {opts} {packages} pip_pre = From 4de4db40a2713d149d8aa4d67aed18667ef30ada Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:30:21 -0400 Subject: [PATCH 388/702] CI: Do not fail fast --- .github/workflows/stable.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 79e081aafc..6bfacfb9c4 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -103,6 +103,7 @@ jobs: # Check each OS, all supported Python, minimum versions and latest releases runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] python-version: [3.8, 3.9, "3.10", "3.11"] # Waiting on H5Py: , "3.12"] From 5cbf0355cd67c415b926bd99d4ebcf708d875f17 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:33:58 -0400 Subject: [PATCH 389/702] CI: Just run tox directly for miscellaneous checks --- .github/workflows/misc.yml | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 556d08a339..2acc944370 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -26,20 +26,11 @@ jobs: steps: - uses: actions/checkout@v3 - with: - submodules: recursive - fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: 3 - name: Display Python version run: python -c "import sys; print(sys.version)" - - name: Install tox - run: | - python -m pip install --upgrade pip - python -m pip install tox tox-gh-actions - - name: Run tox - run: tox - env: - CHECK: ${{ matrix.check }} + - name: Run check + run: pipx run tox -e ${{ matrix.check }} From 5877467403460a3743d28545fd1bc9973b92c1f6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:36:54 -0400 Subject: [PATCH 390/702] MNT: Push h5py support back a bit --- tox.ini | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index e7da703309..1e2b17d073 100644 --- a/tox.ini +++ b/tox.ini @@ -61,14 +61,17 @@ deps = min: scipy ==1.6 min: matplotlib ==3.4 min: pillow ==8.1 - min: h5py ==3.0 + min: h5py ==2.10 min: indexed_gzip ==1.4 min: pyzstd ==0.14.3 + # Numpy 2.0 is a major breaking release; we cannot put much effort into + # supporting until it's at least RC stable pre: numpy <2.0.dev0 full,pre: scipy >=1.6 full,pre: matplotlib >=3.4 full,pre: pillow >=8.1 - full: h5py >=3.0 + # Exception: h5py 3.0.0 dropped win32 wheels, so extend back a little further + full: h5py >=2.10 # h5py missing 3.12 wheels, so disable from pre for now # full,pre: h5py >=3.0 full,pre: indexed_gzip >=1.4 From 69ec580b2f3d98fb6db2d740db11ac1befea0135 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 6 Oct 2023 13:38:23 -0400 Subject: [PATCH 391/702] MNT: Drop tools/ci for tox --- tools/ci/activate.sh | 9 -------- tools/ci/build_archive.sh | 31 ------------------------- tools/ci/check.sh | 36 ----------------------------- tools/ci/create_venv.sh | 24 -------------------- tools/ci/env.sh | 17 -------------- tools/ci/install.sh | 39 -------------------------------- tools/ci/install_dependencies.sh | 32 -------------------------- 7 files changed, 188 deletions(-) delete mode 100644 tools/ci/activate.sh delete mode 100755 tools/ci/build_archive.sh delete mode 100755 tools/ci/check.sh delete mode 100755 tools/ci/create_venv.sh delete mode 100644 tools/ci/env.sh delete mode 100755 tools/ci/install.sh delete mode 100755 tools/ci/install_dependencies.sh diff --git a/tools/ci/activate.sh b/tools/ci/activate.sh deleted file mode 100644 index 567e13a67b..0000000000 --- a/tools/ci/activate.sh +++ /dev/null @@ -1,9 +0,0 @@ -if [ -e virtenv/bin/activate ]; then - source virtenv/bin/activate -elif [ -e virtenv/Scripts/activate ]; then - source virtenv/Scripts/activate -else - echo Cannot activate virtual environment - ls -R virtenv - false -fi diff --git a/tools/ci/build_archive.sh b/tools/ci/build_archive.sh deleted file mode 100755 index 3c25012e1b..0000000000 --- a/tools/ci/build_archive.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -echo "Building archive" - -source tools/ci/activate.sh - -set -eu - -# Required dependencies -echo "INSTALL_TYPE = $INSTALL_TYPE" - -set -x - -if [ "$INSTALL_TYPE" = "sdist" -o "$INSTALL_TYPE" = "wheel" ]; then - python -m build -elif [ "$INSTALL_TYPE" = "archive" ]; then - ARCHIVE="/tmp/package.tar.gz" - git archive -o $ARCHIVE HEAD -fi - -if [ "$INSTALL_TYPE" = "sdist" ]; then - ARCHIVE=$( ls $PWD/dist/*.tar.gz ) -elif [ "$INSTALL_TYPE" = "wheel" ]; then - ARCHIVE=$( ls $PWD/dist/*.whl ) -elif [ "$INSTALL_TYPE" = "pip" ]; then - ARCHIVE="$PWD" -fi - -export ARCHIVE - -set +eux diff --git a/tools/ci/check.sh b/tools/ci/check.sh deleted file mode 100755 index cd90650722..0000000000 --- a/tools/ci/check.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -echo Running tests - -source tools/ci/activate.sh - -set -eu - -# Required variables -echo CHECK_TYPE = $CHECK_TYPE - -set -x - -export NIBABEL_DATA_DIR="$PWD/nibabel-data" - -if [ "${CHECK_TYPE}" == "style" ]; then - # Run styles only on core nibabel code. - flake8 nibabel -elif [ "${CHECK_TYPE}" == "doctest" ]; then - make -C doc html && make -C doc doctest -elif [ "${CHECK_TYPE}" == "test" ]; then - # Change into an innocuous directory and find tests from installation - mkdir for_testing - cd for_testing - cp ../.coveragerc . - pytest --doctest-modules --doctest-plus --cov nibabel --cov-report xml:../cov.xml \ - --junitxml=test-results.xml -v --pyargs nibabel -n auto -elif [ "${CHECK_TYPE}" == "typing" ]; then - mypy nibabel -else - false -fi - -set +eux - -echo Done running tests diff --git a/tools/ci/create_venv.sh b/tools/ci/create_venv.sh deleted file mode 100755 index 7a28767396..0000000000 --- a/tools/ci/create_venv.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -echo Creating isolated virtual environment - -source tools/ci/env.sh - -set -eu - -# Required variables -echo SETUP_REQUIRES = $SETUP_REQUIRES - -set -x - -python -m pip install --upgrade pip virtualenv -virtualenv --python=python virtenv -source tools/ci/activate.sh -python --version -python -m pip install -U $SETUP_REQUIRES -which python -which pip - -set +eux - -echo Done creating isolated virtual environment diff --git a/tools/ci/env.sh b/tools/ci/env.sh deleted file mode 100644 index dd29443126..0000000000 --- a/tools/ci/env.sh +++ /dev/null @@ -1,17 +0,0 @@ -SETUP_REQUIRES="pip build" - -# Minimum requirements -REQUIREMENTS="-r requirements.txt" -# Minimum versions of minimum requirements -MIN_REQUIREMENTS="-r min-requirements.txt" - -DEFAULT_OPT_DEPENDS="scipy matplotlib pillow pydicom h5py indexed_gzip pyzstd" -# pydicom has skipped some important pre-releases, so enable a check against master -PYDICOM_MASTER="git+https://github.com/pydicom/pydicom.git@master" -# Minimum versions of optional requirements -MIN_OPT_DEPENDS="matplotlib==1.5.3 pydicom==1.0.1 pillow==2.6" - -# Numpy and scipy upload nightly/weekly/intermittent wheels -NIGHTLY_WHEELS="https://pypi.anaconda.org/scipy-wheels-nightly/simple" -STAGING_WHEELS="https://pypi.anaconda.org/multibuild-wheels-staging/simple" -PRE_PIP_FLAGS="--pre --extra-index-url $NIGHTLY_WHEELS --extra-index-url $STAGING_WHEELS" diff --git a/tools/ci/install.sh b/tools/ci/install.sh deleted file mode 100755 index c0c3b23e67..0000000000 --- a/tools/ci/install.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -echo Installing nibabel - -source tools/ci/activate.sh -source tools/ci/env.sh - -set -eu - -# Required variables -echo INSTALL_TYPE = $INSTALL_TYPE -echo CHECK_TYPE = $CHECK_TYPE -echo EXTRA_PIP_FLAGS = $EXTRA_PIP_FLAGS - -set -x - -if [ -n "$EXTRA_PIP_FLAGS" ]; then - EXTRA_PIP_FLAGS=${!EXTRA_PIP_FLAGS} -fi - -( - # Ensure installation does not depend on being in source tree - mkdir ../unversioned_install_dir - cd ../unversioned_install_dir - pip install $EXTRA_PIP_FLAGS $ARCHIVE - - # Basic import check - python -c 'import nibabel; print(nibabel.__version__)' -) - -if [ "$CHECK_TYPE" == "skiptests" ]; then - exit 0 -fi - -pip install $EXTRA_PIP_FLAGS "nibabel[$CHECK_TYPE]" - -set +eux - -echo Done installing nibabel diff --git a/tools/ci/install_dependencies.sh b/tools/ci/install_dependencies.sh deleted file mode 100755 index 2ea4a524e8..0000000000 --- a/tools/ci/install_dependencies.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -echo Installing dependencies - -source tools/ci/activate.sh -source tools/ci/env.sh - -set -eu - -# Required variables -echo EXTRA_PIP_FLAGS = $EXTRA_PIP_FLAGS -echo DEPENDS = $DEPENDS -echo OPTIONAL_DEPENDS = $OPTIONAL_DEPENDS - -set -x - -if [ -n "$EXTRA_PIP_FLAGS" ]; then - EXTRA_PIP_FLAGS=${!EXTRA_PIP_FLAGS} -fi - -if [ -n "$DEPENDS" ]; then - pip install ${EXTRA_PIP_FLAGS} --only-binary :all: ${!DEPENDS} - if [ -n "$OPTIONAL_DEPENDS" ]; then - for DEP in ${!OPTIONAL_DEPENDS}; do - pip install ${EXTRA_PIP_FLAGS} --only-binary :all: $DEP || true - done - fi -fi - -set +eux - -echo Done installing dependencies From f2ca9be4987c101586b88e64dd90a1ede3158a4b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 21:56:57 -0400 Subject: [PATCH 392/702] CI: Install doctestplus correctly --- .github/workflows/stable.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 6bfacfb9c4..9d489f4dab 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -95,9 +95,9 @@ jobs: run: pip install archive/nibabel-archive.tgz - run: python -c 'import nibabel; print(nibabel.__version__)' - name: Install minimum test dependencies - run: pip install pytest pytest-doctest-plus + run: pip install pytest pytest-doctestplus pytest-xdist - name: Run tests - run: pytest --doctest-modules --doctest-plus -v --pyargs nibabel + run: pytest --doctest-modules --doctest-plus -v --pyargs nibabel -n auto stable: # Check each OS, all supported Python, minimum versions and latest releases From 22e8b94bc084bfe884a0da507b2b876356e6080d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 22:14:33 -0400 Subject: [PATCH 393/702] MNT: Use none to explicitly avoid dependencies, add labels --- .github/workflows/stable.yml | 8 ++++---- tox.ini | 23 +++++++++++++++-------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 9d489f4dab..3adb060ac0 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -113,20 +113,20 @@ jobs: # Basic dependencies only - os: ubuntu-latest python-version: 3.8 - dependencies: '' + dependencies: 'none' # Absolute minimum dependencies - os: ubuntu-latest python-version: 3.8 dependencies: 'min' - os: ubuntu-latest python-version: 3.12 - dependencies: '' + dependencies: 'none' - os: windows-latest python-version: 3.12 - dependencies: '' + dependencies: 'none' - os: macos-latest python-version: 3.12 - dependencies: '' + dependencies: 'none' exclude: - os: ubuntu-latest architecture: x86 diff --git a/tox.ini b/tox.ini index 1e2b17d073..d8827bd3ae 100644 --- a/tox.ini +++ b/tox.ini @@ -2,9 +2,9 @@ requires = tox>=4 envlist = - py38{,-min,-full} + py38-{none,min,full} py3{9,10}-full - py3{11,12}{,-pre,-full} + py3{11,12}-{none,pre,full} doctest style typecheck @@ -20,6 +20,7 @@ python = [gh-actions:env] DEPENDS = + none: none pre: pre full: full min: min @@ -32,6 +33,7 @@ CHECK = [testenv] description = Typical pytest invocation with coverage +labels = test install_command = python -I -m pip install \ --only-binary numpy,scipy,h5py @@ -88,6 +90,7 @@ commands = [testenv:install] description = "Install and verify imports succeed" +labels = test deps = install_command = python -I -m pip install {opts} {packages} commands = @@ -95,6 +98,7 @@ commands = [testenv:docs] description = Typical pytest invocation with coverage +labels = docs allowlist_externals = make deps = sphinx @@ -107,6 +111,7 @@ commands = [testenv:doctest] description = Typical pytest invocation with coverage +labels = docs allowlist_externals = make depends = docs deps = @@ -121,6 +126,7 @@ commands = [testenv:style] description = Check our style guide +labels = check deps = flake8 blue @@ -133,6 +139,7 @@ commands = [testenv:style-fix] description = Auto-apply style guide to the extent possible +labels = pre-release deps = blue isort[colors] @@ -143,6 +150,7 @@ commands = [testenv:typecheck] description = Check type consistency +labels = check deps = mypy pytest @@ -157,6 +165,9 @@ commands = mypy nibabel [testenv:build{,-strict}] +labels = + check + pre-release deps = build twine @@ -169,6 +180,7 @@ commands = [testenv:publish] depends = build +labels = release deps = twine skip_install = true @@ -177,12 +189,7 @@ commands = [testenv:zenodo] deps = gitpython +labels = pre-release skip_install = true commands = python tools/prep_zenodo.py - -[testenv:pre-release] -depends = - zenodo - style-fix - build From 574846cc189d55a56647f9c16c6fc0cdc1ea225c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 22:22:19 -0400 Subject: [PATCH 394/702] CI: Show tox config for debugging --- .github/workflows/misc.yml | 4 ++++ .github/workflows/pre-release.yml | 4 ++++ .github/workflows/stable.yml | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 2acc944370..616e246350 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -32,5 +32,9 @@ jobs: python-version: 3 - name: Display Python version run: python -c "import sys; print(sys.version)" + - name: Show tox config + run: pipx run tox c + - name: Show tox config (this call) + run: pipx run tox c -e ${{ matrix.check }} - name: Run check run: pipx run tox -e ${{ matrix.check }} diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index e55787bd2a..879221e587 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -61,6 +61,10 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install tox tox-gh-actions + - name: Show tox config + run: pipx run tox c + env: + DEPENDS: ${{ matrix.dependencies }} - name: Run tox run: tox env: diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 3adb060ac0..6da797a457 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -152,6 +152,10 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install tox tox-gh-actions + - name: Show tox config + run: pipx run tox c + env: + DEPENDS: ${{ matrix.dependencies }} - name: Run tox run: tox env: From 366ff8e9a3214445f387a624872dbce150ff1be6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 22:41:39 -0400 Subject: [PATCH 395/702] CI: Hack around h5py weirdness --- .github/workflows/pre-release.yml | 8 ++++---- .github/workflows/stable.yml | 8 ++++---- tox.ini | 16 +++++++++++----- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 879221e587..ba596979da 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -44,6 +44,10 @@ jobs: - python-version: '3.12' architecture: x86 + env: + DEPENDS: ${{ matrix.dependencies }} + ARCH: ${{ matrix.architecture }} + steps: - uses: actions/checkout@v3 with: @@ -63,12 +67,8 @@ jobs: python -m pip install tox tox-gh-actions - name: Show tox config run: pipx run tox c - env: - DEPENDS: ${{ matrix.dependencies }} - name: Run tox run: tox - env: - DEPENDS: ${{ matrix.dependencies }} - uses: codecov/codecov-action@v3 if: ${{ always() }} with: diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 6da797a457..e3c9e3b022 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -135,6 +135,10 @@ jobs: - python-version: '3.12' architecture: x86 + env: + DEPENDS: ${{ matrix.dependencies }} + ARCH: ${{ ! contains(['none', 'min'], matrix.dependencies) && matrix.architecture }} + steps: - uses: actions/checkout@v3 with: @@ -154,12 +158,8 @@ jobs: python -m pip install tox tox-gh-actions - name: Show tox config run: pipx run tox c - env: - DEPENDS: ${{ matrix.dependencies }} - name: Run tox run: tox - env: - DEPENDS: ${{ matrix.dependencies }} - uses: codecov/codecov-action@v3 if: ${{ always() }} with: diff --git a/tox.ini b/tox.ini index d8827bd3ae..71c0015639 100644 --- a/tox.ini +++ b/tox.ini @@ -25,6 +25,9 @@ DEPENDS = full: full min: min +ARCH = + x64: x64 + CHECK = build: build doctest: doctest @@ -63,7 +66,6 @@ deps = min: scipy ==1.6 min: matplotlib ==3.4 min: pillow ==8.1 - min: h5py ==2.10 min: indexed_gzip ==1.4 min: pyzstd ==0.14.3 # Numpy 2.0 is a major breaking release; we cannot put much effort into @@ -72,15 +74,19 @@ deps = full,pre: scipy >=1.6 full,pre: matplotlib >=3.4 full,pre: pillow >=8.1 - # Exception: h5py 3.0.0 dropped win32 wheels, so extend back a little further - full: h5py >=2.10 - # h5py missing 3.12 wheels, so disable from pre for now - # full,pre: h5py >=3.0 full,pre: indexed_gzip >=1.4 full,pre: pyzstd >=0.14.3 min: pydicom ==2.1 full,pre: pydicom >=2.1 + # pydicom master seems to be breaking things # pre: pydicom @ git+https://github.com/pydicom/pydicom.git@main + # h5py is a pain. They dropped win32 wheels at 3.0, which only supports + # thru py39. Add a special -x64 environment to limit tests to x64. We + # will exclude this environment for none/min in GitHub actions. + min: h5py ==2.10 + x64: h5py >=2.10 + # h5py missing 3.12 wheels, so disable from pre for now + # pre: h5py >=2.10 commands = pytest --doctest-modules --doctest-plus \ From e77199a74e9f91edbfb259f1bf7d48011a659938 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 22:51:19 -0400 Subject: [PATCH 396/702] MNT: Restore doc/test extras --- .github/workflows/stable.yml | 2 +- pyproject.toml | 20 +++++++++++++++++--- tox.ini | 25 ++++++------------------- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index e3c9e3b022..8d9697091a 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -95,7 +95,7 @@ jobs: run: pip install archive/nibabel-archive.tgz - run: python -c 'import nibabel; print(nibabel.__version__)' - name: Install minimum test dependencies - run: pip install pytest pytest-doctestplus pytest-xdist + run: pip install nibabel[test] - name: Run tests run: pytest --doctest-modules --doctest-plus -v --pyargs nibabel -n auto diff --git a/pyproject.toml b/pyproject.toml index 7ae7dbda1f..beb81fb0d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,12 +56,26 @@ dicomfs = ["nibabel[dicom]", "pillow"] minc2 = ["h5py"] spm = ["scipy"] zstd = ["pyzstd >= 0.14.3"] -# Dev dependencies: Move to tox, keep aliases to avoid breaking workflows +# For doc and test, make easy to use outside of tox +# tox should use these with extras instead of duplicating +doc = [ + "sphinx", + "matplotlib>=1.5.3", + "numpydoc", + "texext", + "tomli; python_version < '3.11'", +] +test = [ + "pytest", + "pytest-doctestplus", + "pytest-cov", + "pytest-httpserver", + "pytest-xdist", +] +# Remaining: Simpler to centralize in tox dev = ["tox"] -doc = ["tox"] doctest = ["tox"] style = ["tox"] -test = ["tox"] typing = ["tox"] [tool.hatch.build.targets.sdist] diff --git a/tox.ini b/tox.ini index 71c0015639..bd6a5516bb 100644 --- a/tox.ini +++ b/tox.ini @@ -50,12 +50,8 @@ pass_env = USER LNAME USERNAME +extras = test deps = - pytest - pytest-doctestplus - pytest-cov - pytest-httpserver - pytest-xdist # NEP29/SPEC0 + 1yr: Test on minor release series within the last 3 years # We're extending this to all optional dependencies # This only affects the range that we test on; numpy is the only non-optional @@ -98,6 +94,7 @@ commands = description = "Install and verify imports succeed" labels = test deps = +extras = install_command = python -I -m pip install {opts} {packages} commands = python -c "import nibabel; print(nibabel.__version__)" @@ -106,12 +103,7 @@ commands = description = Typical pytest invocation with coverage labels = docs allowlist_externals = make -deps = - sphinx - matplotlib>=1.5.3 - numpydoc - texext - tomli; python_version < "3.11" +extras = doc commands = make -C doc html @@ -119,14 +111,9 @@ commands = description = Typical pytest invocation with coverage labels = docs allowlist_externals = make -depends = docs -deps = - sphinx - pytest - matplotlib>=1.5.3 - numpydoc - texext - tomli; python_version < "3.11" +extras = + doc + test commands = make -C doc doctest From ff8cd5e9c2f6f8a3ca4e8780f64e7a789857f427 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 22:57:40 -0400 Subject: [PATCH 397/702] CI: Fix expr syntax --- .github/workflows/stable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index 8d9697091a..cdeb702a93 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -137,7 +137,7 @@ jobs: env: DEPENDS: ${{ matrix.dependencies }} - ARCH: ${{ ! contains(['none', 'min'], matrix.dependencies) && matrix.architecture }} + ARCH: ${{ !contains(fromJSON('["none", "min"]'), matrix.dependencies) && matrix.architecture }} steps: - uses: actions/checkout@v3 From 5f6c8384c51cafeb3caef1b798226736e6c840c5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 22:59:41 -0400 Subject: [PATCH 398/702] MNT: scipy unavailable for some x86 Pythons --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index bd6a5516bb..8a74fa941e 100644 --- a/tox.ini +++ b/tox.ini @@ -67,7 +67,7 @@ deps = # Numpy 2.0 is a major breaking release; we cannot put much effort into # supporting until it's at least RC stable pre: numpy <2.0.dev0 - full,pre: scipy >=1.6 + x64,pre: scipy >=1.6 full,pre: matplotlib >=3.4 full,pre: pillow >=8.1 full,pre: indexed_gzip >=1.4 From 7d796a82e5a9792fee5f3bd2d55eb179d45f3b35 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 23:14:14 -0400 Subject: [PATCH 399/702] TOX: Update environment list to match CI targets --- tox.ini | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 8a74fa941e..3c6dcfc4e5 100644 --- a/tox.ini +++ b/tox.ini @@ -2,9 +2,15 @@ requires = tox>=4 envlist = - py38-{none,min,full} - py3{9,10}-full - py3{11,12}-{none,pre,full} + # No preinstallations + py3{8,9,10,11,12}-none + # Minimum Python + py38-{min,full} + # x86 support range + py3{9,10,11}-{full,pre}-{x86,x64} + py3{9,10,11}-pre-{x86,x64} + # x64-only range + py312-{full,pre}-x64 doctest style typecheck @@ -27,6 +33,7 @@ DEPENDS = ARCH = x64: x64 + x86: x86 CHECK = build: build From dc611df7eecdd728de6e1fec2117f0c94971d73f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 23:27:44 -0400 Subject: [PATCH 400/702] TOX: h5py is not unique, handle scipy likewise --- tox.ini | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tox.ini b/tox.ini index 3c6dcfc4e5..9e379c8534 100644 --- a/tox.ini +++ b/tox.ini @@ -68,14 +68,18 @@ deps = min: importlib_resources ==1.3; python_version < '3.9' min: scipy ==1.6 min: matplotlib ==3.4 + min: h5py ==2.10 min: pillow ==8.1 min: indexed_gzip ==1.4 min: pyzstd ==0.14.3 # Numpy 2.0 is a major breaking release; we cannot put much effort into # supporting until it's at least RC stable pre: numpy <2.0.dev0 - x64,pre: scipy >=1.6 + # Scipy stopped producing win32 wheels at py310 + py3{8,9}-full-x86,x64: scipy >=1.6 full,pre: matplotlib >=3.4 + # h5py stopped producing win32 wheels at py310, has yet to produce py312 wheels + py3{8,9}-full-x86,py3{8,9,10,11}-x64: h5py >=2.10 full,pre: pillow >=8.1 full,pre: indexed_gzip >=1.4 full,pre: pyzstd >=0.14.3 @@ -83,13 +87,6 @@ deps = full,pre: pydicom >=2.1 # pydicom master seems to be breaking things # pre: pydicom @ git+https://github.com/pydicom/pydicom.git@main - # h5py is a pain. They dropped win32 wheels at 3.0, which only supports - # thru py39. Add a special -x64 environment to limit tests to x64. We - # will exclude this environment for none/min in GitHub actions. - min: h5py ==2.10 - x64: h5py >=2.10 - # h5py missing 3.12 wheels, so disable from pre for now - # pre: h5py >=2.10 commands = pytest --doctest-modules --doctest-plus \ From 2fa1daaf1bcd611c4af8d9a936117281e22bbe0a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 23:42:03 -0400 Subject: [PATCH 401/702] TOX: Fix h5py range, avoid indexed_gzip on 3.12 --- tox.ini | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 9e379c8534..c06d480561 100644 --- a/tox.ini +++ b/tox.ini @@ -78,10 +78,11 @@ deps = # Scipy stopped producing win32 wheels at py310 py3{8,9}-full-x86,x64: scipy >=1.6 full,pre: matplotlib >=3.4 - # h5py stopped producing win32 wheels at py310, has yet to produce py312 wheels - py3{8,9}-full-x86,py3{8,9,10,11}-x64: h5py >=2.10 + # h5py stopped producing win32 wheels at py39, has yet to produce py312 wheels + py38-full-x86,py3{8,9,10,11}-x64: h5py >=2.10 full,pre: pillow >=8.1 - full,pre: indexed_gzip >=1.4 + # indexed_gzip missing py312 wheels + py3{8,9,10,11}-{full,pre}: indexed_gzip >=1.4 full,pre: pyzstd >=0.14.3 min: pydicom ==2.1 full,pre: pydicom >=2.1 From bac556d2e94ee8c04c424c3fd5e9ea882ee1e731 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Oct 2023 23:59:12 -0400 Subject: [PATCH 402/702] CI: Pending wheels are covered by tox.ini --- .github/workflows/stable.yml | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index cdeb702a93..e8199cbaf9 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -106,7 +106,7 @@ jobs: fail-fast: false matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: [3.8, 3.9, "3.10", "3.11"] # Waiting on H5Py: , "3.12"] + python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] architecture: ['x64', 'x86'] dependencies: ['full'] include: @@ -118,15 +118,6 @@ jobs: - os: ubuntu-latest python-version: 3.8 dependencies: 'min' - - os: ubuntu-latest - python-version: 3.12 - dependencies: 'none' - - os: windows-latest - python-version: 3.12 - dependencies: 'none' - - os: macos-latest - python-version: 3.12 - dependencies: 'none' exclude: - os: ubuntu-latest architecture: x86 From bd498d4d64de792d8cca029c1588f834671ad97b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 8 Oct 2023 00:06:50 -0400 Subject: [PATCH 403/702] DOC: Improve tox.ini documentation --- tox.ini | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/tox.ini b/tox.ini index c06d480561..7bc1059d73 100644 --- a/tox.ini +++ b/tox.ini @@ -1,3 +1,7 @@ +# This file encodes a lot of our intended support range, as well as some +# details about dependency availability. +# +# The majority of the information is contained in tox.envlist and testenv.deps. [tox] requires = tox>=4 @@ -16,6 +20,7 @@ envlist = typecheck skip_missing_interpreters = true +# Configuration that allows us to split tests across GitHub runners effectively [gh-actions] python = 3.8: py38 @@ -35,14 +40,8 @@ ARCH = x64: x64 x86: x86 -CHECK = - build: build - doctest: doctest - style: style - typecheck: typecheck - [testenv] -description = Typical pytest invocation with coverage +description = Pytest with coverage labels = test install_command = python -I -m pip install \ @@ -96,7 +95,7 @@ commands = --pyargs nibabel {posargs:-n auto} [testenv:install] -description = "Install and verify imports succeed" +description = Install and verify import succeeds labels = test deps = extras = @@ -105,7 +104,7 @@ commands = python -c "import nibabel; print(nibabel.__version__)" [testenv:docs] -description = Typical pytest invocation with coverage +description = Build documentation site labels = docs allowlist_externals = make extras = doc @@ -113,7 +112,7 @@ commands = make -C doc html [testenv:doctest] -description = Typical pytest invocation with coverage +description = Run doctests in documentation site labels = docs allowlist_externals = make extras = From 74b99652e700ee309b5fe30db572e2457baf0be3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Oct 2023 10:53:13 -0400 Subject: [PATCH 404/702] CI: Timeout tox and dump debug information if we go >20 minutes --- .github/workflows/stable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml index e8199cbaf9..d7cac1055b 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/stable.yml @@ -150,7 +150,7 @@ jobs: - name: Show tox config run: pipx run tox c - name: Run tox - run: tox + run: tox --exit-and-dump-after 1200 - uses: codecov/codecov-action@v3 if: ${{ always() }} with: From 7f28eb30482c254ad57309e33526bcd765e59a3a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Oct 2023 10:53:57 -0400 Subject: [PATCH 405/702] TOX: Use h5py wheels for all full/pre-x64 builds --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 7bc1059d73..64584b1d82 100644 --- a/tox.ini +++ b/tox.ini @@ -77,8 +77,8 @@ deps = # Scipy stopped producing win32 wheels at py310 py3{8,9}-full-x86,x64: scipy >=1.6 full,pre: matplotlib >=3.4 - # h5py stopped producing win32 wheels at py39, has yet to produce py312 wheels - py38-full-x86,py3{8,9,10,11}-x64: h5py >=2.10 + # h5py stopped producing win32 wheels at py39 + py38-full-x86,x64: h5py >=2.10 full,pre: pillow >=8.1 # indexed_gzip missing py312 wheels py3{8,9,10,11}-{full,pre}: indexed_gzip >=1.4 From 648a2252c663b10fa4516e3e4c54b11fad332ff7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 07:48:16 -0400 Subject: [PATCH 406/702] CI: Consolidate stable and pre-release tests --- .github/workflows/pre-release.yml | 81 ---------------------- .github/workflows/{stable.yml => test.yml} | 9 +-- 2 files changed, 5 insertions(+), 85 deletions(-) delete mode 100644 .github/workflows/pre-release.yml rename .github/workflows/{stable.yml => test.yml} (97%) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml deleted file mode 100644 index ba596979da..0000000000 --- a/.github/workflows/pre-release.yml +++ /dev/null @@ -1,81 +0,0 @@ -name: Pre-release checks - -# This file tests against pre-release wheels for dependencies - -on: - push: - branches: - - master - - maint/* - pull_request: - branches: - - master - - maint/* - schedule: - - cron: '0 0 * * *' - -defaults: - run: - shell: bash - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - pre-release: - # Check pre-releases of dependencies on stable Python - runs-on: ${{ matrix.os }} - continue-on-error: true - strategy: - matrix: - os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: ["3.9", "3.10", "3.11", "3.12"] - architecture: ['x64', 'x86'] - dependencies: ['pre'] - exclude: - - os: ubuntu-latest - architecture: x86 - - os: macos-latest - architecture: x86 - - python-version: '3.12' - architecture: x86 - - env: - DEPENDS: ${{ matrix.dependencies }} - ARCH: ${{ matrix.architecture }} - - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - fetch-depth: 0 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - architecture: ${{ matrix.architecture }} - allow-prereleases: true - - name: Display Python version - run: python -c "import sys; print(sys.version)" - - name: Install tox - run: | - python -m pip install --upgrade pip - python -m pip install tox tox-gh-actions - - name: Show tox config - run: pipx run tox c - - name: Run tox - run: tox - - uses: codecov/codecov-action@v3 - if: ${{ always() }} - with: - files: cov.xml - - name: Upload pytest test results - uses: actions/upload-artifact@v3 - with: - name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} - path: test-results.xml - if: ${{ always() }} diff --git a/.github/workflows/stable.yml b/.github/workflows/test.yml similarity index 97% rename from .github/workflows/stable.yml rename to .github/workflows/test.yml index d7cac1055b..d7c9a4cb9b 100644 --- a/.github/workflows/stable.yml +++ b/.github/workflows/test.yml @@ -1,4 +1,4 @@ -name: Stable tests +name: Build and test # This file tests the claimed support range of NiBabel including # @@ -99,16 +99,17 @@ jobs: - name: Run tests run: pytest --doctest-modules --doctest-plus -v --pyargs nibabel -n auto - stable: + test: # Check each OS, all supported Python, minimum versions and latest releases runs-on: ${{ matrix.os }} + continue-on-error: ${{ matrix.dependencies == 'pre' }} strategy: fail-fast: false matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] architecture: ['x64', 'x86'] - dependencies: ['full'] + dependencies: ['full', 'pre'] include: # Basic dependencies only - os: ubuntu-latest @@ -165,7 +166,7 @@ jobs: publish: runs-on: ubuntu-latest environment: "Package deployment" - needs: [stable, test-package] + needs: [test, test-package] if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - uses: actions/download-artifact@v3 From 18506874e14492bb3ed4a7f54c33f94e1940f006 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 07:52:31 -0400 Subject: [PATCH 407/702] CI: Add verbosity to tox --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d7c9a4cb9b..4ffaccbdec 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -151,7 +151,7 @@ jobs: - name: Show tox config run: pipx run tox c - name: Run tox - run: tox --exit-and-dump-after 1200 + run: tox -v --exit-and-dump-after 1200 - uses: codecov/codecov-action@v3 if: ${{ always() }} with: From 194cdb4452d6c995baaa46b234573601cd1448bd Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 08:02:32 -0400 Subject: [PATCH 408/702] CI: Remove unnecessary pipx call --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4ffaccbdec..d0e840f1be 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -149,7 +149,7 @@ jobs: python -m pip install --upgrade pip python -m pip install tox tox-gh-actions - name: Show tox config - run: pipx run tox c + run: tox c - name: Run tox run: tox -v --exit-and-dump-after 1200 - uses: codecov/codecov-action@v3 From f6a2c9f629107bfe0576194dddaf4dd1c54ce9ff Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 08:27:45 -0400 Subject: [PATCH 409/702] TOX: Pillow is hard to build on CI --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 64584b1d82..55afd815da 100644 --- a/tox.ini +++ b/tox.ini @@ -45,7 +45,7 @@ description = Pytest with coverage labels = test install_command = python -I -m pip install \ - --only-binary numpy,scipy,h5py + --only-binary numpy,scipy,h5py,pillow \ --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ {opts} {packages} pip_pre = From 15fe94ef3a9d94a80bf0147e9e01480b2c2e9563 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 08:54:06 -0400 Subject: [PATCH 410/702] TOX: Match matplotlib conditions to scipy --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 55afd815da..ee0e568c7f 100644 --- a/tox.ini +++ b/tox.ini @@ -76,7 +76,8 @@ deps = pre: numpy <2.0.dev0 # Scipy stopped producing win32 wheels at py310 py3{8,9}-full-x86,x64: scipy >=1.6 - full,pre: matplotlib >=3.4 + # Matplotlib depends on scipy, so cannot be built for py310 on x86 + py3{8,9}-full-x86,x64: matplotlib >=3.4 # h5py stopped producing win32 wheels at py39 py38-full-x86,x64: h5py >=2.10 full,pre: pillow >=8.1 From f4fefd5fdef1f9fb953455c513b60e0318993151 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 09:01:52 -0400 Subject: [PATCH 411/702] CI: Add install to none and full tests --- tox.ini | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index ee0e568c7f..c9ba983ac2 100644 --- a/tox.ini +++ b/tox.ini @@ -15,6 +15,7 @@ envlist = py3{9,10,11}-pre-{x86,x64} # x64-only range py312-{full,pre}-x64 + install doctest style typecheck @@ -31,9 +32,9 @@ python = [gh-actions:env] DEPENDS = - none: none + none: none, install pre: pre - full: full + full: full, install min: min ARCH = From dadd3f5df55dbdd00319c66ef2ad0f66e8a54c35 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 09:03:30 -0400 Subject: [PATCH 412/702] MNT: Ignore coverage/testing summary outputs --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 4e9cf81029..e413527d13 100644 --- a/.gitignore +++ b/.gitignore @@ -48,7 +48,9 @@ dist/ *.egg-info/ .shelf .tox/ -.coverage +.coverage* +cov.xml +test-results.xml .ropeproject/ htmlcov/ .*_cache/ From c7ef0d4a8c690630419a04dbb962d3dd4c809d36 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 23:20:51 -0400 Subject: [PATCH 413/702] DOC: Add docs on using tox and pre-commit --- doc/source/devel/devguide.rst | 48 +++++++++++++++++++++++++++++++++++ nibabel/info.py | 21 +++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/doc/source/devel/devguide.rst b/doc/source/devel/devguide.rst index 2747564dbf..bce5b64aaa 100644 --- a/doc/source/devel/devguide.rst +++ b/doc/source/devel/devguide.rst @@ -95,6 +95,50 @@ advise that you enable merge summaries within git: See :ref:`configure-git` for more detail. +Pre-commit hooks +---------------- + +NiBabel uses pre-commit_ to help committers validate their changes +before committing. To enable these, you can use pipx_:: + + pipx run pre-commit install + +Or install and run:: + + python -m pip install pre-commit + pre-commit install + + +Testing +======= + +NiBabel uses tox_ to organize our testing and development workflows. +tox runs tests in isolated environments that we specify, +ensuring that we are able to test across many different environments, +and those environments do not depend on our local configurations. + +If you have the pipx_ tool installed, then you may simply:: + + pipx run tox + +Alternatively, you can install tox and run it:: + + python -m pip install tox + tox + +This will run the tests in several configurations, with multiple sets of +optional dependencies. +If you have multiple versions of Python installed in your path, it will +repeat the process for each version of Python iin our supported range. +It may be useful to pick a particular version for rapid development:: + + tox -e py311-full-x64 + +This will run the environment using the Python 3.11 interpreter, with the +full set of optional dependencies that are available for 64-bit +interpreters. If you are using 32-bit Python, replace ``-x64`` with ``-x86``. + + Changelog ========= @@ -123,3 +167,7 @@ Community guidelines Please see `our community guidelines `_. Other projects call these guidelines the "code of conduct". + +.. _tox: https://tox.wiki +.. _pipx: https://pypa.github.io/pipx/ +.. _precommit: https://pre-commit.com/ diff --git a/nibabel/info.py b/nibabel/info.py index 063978444c..33d1b0aa0d 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -62,6 +62,27 @@ .. _release archive: https://github.com/nipy/NiBabel/releases .. _development changelog: https://nipy.org/nibabel/changelog.html +Testing +======= + +During development, we recommend using tox_ to run nibabel tests:: + + git clone https://github.com/nipy/nibabel.git + cd nibabel + tox + +To test an installed version of nibabel, install the test dependencies +and run pytest_:: + + pip install nibabel[test] + pytest --pyargs nibabel + +For more inforation, consult the `developer guidelines`_. + +.. _tox: https://tox.wiki +.. _pytest: https://docs.pytest.org +.. _developer guidelines: https://nipy.org/nibabel/devel/devguide.html + Mailing List ============ From f244c4b8629382194adf194b8fbd0f888dd87e9c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 10 Oct 2023 23:21:08 -0400 Subject: [PATCH 414/702] TOX: Add NIPY_EXTRA_TESTS to pass_env --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index c9ba983ac2..51819fbb64 100644 --- a/tox.ini +++ b/tox.ini @@ -51,12 +51,14 @@ install_command = {opts} {packages} pip_pre = pre: true -# getpass.getuser() sources for Windows: pass_env = + # getpass.getuser() sources for Windows: LOGNAME USER LNAME USERNAME + # Environment variables we check for + NIPY_EXTRA_TESTS extras = test deps = # NEP29/SPEC0 + 1yr: Test on minor release series within the last 3 years From 873d5bfdcc2415c5730fe934902c4deba8d0807d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 11 Oct 2023 07:42:31 -0400 Subject: [PATCH 415/702] CI: Quote python versions for consistency --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d0e840f1be..04715c7673 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -107,7 +107,7 @@ jobs: fail-fast: false matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] architecture: ['x64', 'x86'] dependencies: ['full', 'pre'] include: From 6e86852e3a0cd34c26f5ae13fbc7f1abc9a9bb28 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 11 Oct 2023 08:03:27 -0400 Subject: [PATCH 416/702] TOX: Update install_command overrides with x86/x64/pre-specific overrides --- tox.ini | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 51819fbb64..dd8e1650b9 100644 --- a/tox.ini +++ b/tox.ini @@ -45,9 +45,10 @@ ARCH = description = Pytest with coverage labels = test install_command = - python -I -m pip install \ - --only-binary numpy,scipy,h5py,pillow \ - --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ + python -I -m pip install -v \ + x64: --only-binary numpy,scipy,h5py,pillow \ + x86: --only-binary numpy,scipy,h5py,pillow,matplotlib \ + pre: --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ {opts} {packages} pip_pre = pre: true From 5a8e303f3cdc3ef31e08a8fa87a7c6503a4bfaac Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 11 Oct 2023 08:45:10 -0400 Subject: [PATCH 417/702] CI: Merge checks into test workflow --- .github/workflows/misc.yml | 40 -------------------------------------- .github/workflows/test.yml | 22 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 40 deletions(-) delete mode 100644 .github/workflows/misc.yml diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml deleted file mode 100644 index 616e246350..0000000000 --- a/.github/workflows/misc.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Miscellaneous checks - -# This file runs doctests on the documentation and style checks - -on: - push: - branches: - - master - - maint/* - pull_request: - branches: - - master - - maint/* - -defaults: - run: - shell: bash - -jobs: - misc: - runs-on: 'ubuntu-latest' - continue-on-error: true - strategy: - matrix: - check: ['style', 'doctest', 'typecheck'] - - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: 3 - - name: Display Python version - run: python -c "import sys; print(sys.version)" - - name: Show tox config - run: pipx run tox c - - name: Show tox config (this call) - run: pipx run tox c -e ${{ matrix.check }} - - name: Run check - run: pipx run tox -e ${{ matrix.check }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 04715c7673..48ab9b7ff1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -163,6 +163,28 @@ jobs: path: test-results.xml if: ${{ always() }} + checks: + runs-on: 'ubuntu-latest' + continue-on-error: true + strategy: + matrix: + check: ['style', 'doctest', 'typecheck'] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: 3 + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Show tox config + run: pipx run tox c + - name: Show tox config (this call) + run: pipx run tox c -e ${{ matrix.check }} + - name: Run check + run: pipx run tox -e ${{ matrix.check }} + publish: runs-on: ubuntu-latest environment: "Package deployment" From a0dc67e6bd698c3313d462aa7b5391714b9f20aa Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 11 Oct 2023 08:57:59 -0400 Subject: [PATCH 418/702] CI: Update action version --- .github/workflows/test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 48ab9b7ff1..9b12727bda 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -132,7 +132,7 @@ jobs: ARCH: ${{ !contains(fromJSON('["none", "min"]'), matrix.dependencies) && matrix.architecture }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -171,7 +171,7 @@ jobs: check: ['style', 'doctest', 'typecheck'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: From 466e929efdc84b93e0998dba2e757ee616f521e2 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 15 Oct 2023 15:44:19 +0200 Subject: [PATCH 419/702] DOC: Fix typos found by codespell --- Changelog | 2 +- doc/source/devel/biaps/biap_0006.rst | 2 +- nibabel/pointset.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Changelog b/Changelog index e5bbac91ae..cb30decc64 100644 --- a/Changelog +++ b/Changelog @@ -1246,7 +1246,7 @@ Special thanks to Chris Burns, Jarrod Millman and Yaroslav Halchenko. * Very preliminary, limited and highly experimental DICOM reading support (MB, Ian Nimmo Smith). * Some functions (:py:mod:`nibabel.funcs`) for basic image shape changes, including - the ability to transform to the image with data closest to the cononical + the ability to transform to the image with data closest to the canonical image orientation (first axis left-to-right, second back-to-front, third down-to-up) (MB, Jonathan Taylor) * Gifti format read and write support (preliminary) (Stephen Gerhard) diff --git a/doc/source/devel/biaps/biap_0006.rst b/doc/source/devel/biaps/biap_0006.rst index 16a3a4833f..effe3d343c 100644 --- a/doc/source/devel/biaps/biap_0006.rst +++ b/doc/source/devel/biaps/biap_0006.rst @@ -202,7 +202,7 @@ here is the definition of a "multi-frame image":: 3.8.9 Multi-frame image: Image that contains multiple two-dimensional pixel planes. -From `PS 3.3 of the 2011 DICOM standrd +From `PS 3.3 of the 2011 DICOM standard `_. ********************************** diff --git a/nibabel/pointset.py b/nibabel/pointset.py index b40449801d..58fca148a8 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -126,7 +126,7 @@ def get_coords(self, *, as_homogeneous: bool = False): ---------- as_homogeneous : :class:`bool` Return homogeneous coordinates if ``True``, or Cartesian - coordiantes if ``False``. + coordinates if ``False``. name : :class:`str` Select a particular coordinate system if more than one may exist. From 2596179e3f666bbc15ec37f47d418b93c691b9f8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 18 Oct 2023 08:36:18 -0400 Subject: [PATCH 420/702] TOX: Add spellcheck environment --- pyproject.toml | 4 ++++ tox.ini | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index beb81fb0d4..50905dff56 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -129,3 +129,7 @@ python_version = "3.11" exclude = [ "/tests", ] + +[tool.codespell] +skip = "*/data/*,./nibabel-data" +ignore-words-list = "ans,te,ue,ist,nin,nd,ccompiler,ser" diff --git a/tox.ini b/tox.ini index dd8e1650b9..b5328d081a 100644 --- a/tox.ini +++ b/tox.ini @@ -150,6 +150,15 @@ commands = blue nibabel isort nibabel +[testenv:spellcheck] +description = Check spelling +labels = check +deps = + codespell[toml] +skip_install = true +commands = + codespell . {posargs} + [testenv:typecheck] description = Check type consistency labels = check From 5098133c9d12fcc4b686d18a3c2f0fd4575fd006 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 18 Oct 2023 08:44:59 -0400 Subject: [PATCH 421/702] TEST: Unroll hash check, do not run unnecessarily --- nibabel/freesurfer/tests/test_io.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 2406679d73..183a67ed2e 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -7,6 +7,7 @@ import warnings from os.path import isdir from os.path import join as pjoin +from pathlib import Path import numpy as np import pytest @@ -46,14 +47,6 @@ ) -def _hash_file_content(fname): - hasher = hashlib.md5() - with open(fname, 'rb') as afile: - buf = afile.read() - hasher.update(buf) - return hasher.hexdigest() - - @freesurfer_test def test_geometry(): """Test IO of .surf""" @@ -179,7 +172,6 @@ def test_annot(): annots = ['aparc', 'aparc.a2005s'] for a in annots: annot_path = pjoin(data_path, 'label', f'lh.{a}.annot') - hash_ = _hash_file_content(annot_path) labels, ctab, names = read_annot(annot_path) assert labels.shape == (163842,) @@ -190,9 +182,10 @@ def test_annot(): labels_orig, _, _ = read_annot(annot_path, orig_ids=True) np.testing.assert_array_equal(labels == -1, labels_orig == 0) # Handle different version of fsaverage - if hash_ == 'bf0b488994657435cdddac5f107d21e8': + content_hash = hashlib.md5(Path(annot_path).read_bytes()).hexdigest() + if content_hash == 'bf0b488994657435cdddac5f107d21e8': assert np.sum(labels_orig == 0) == 13887 - elif hash_ == 'd4f5b7cbc2ed363ac6fcf89e19353504': + elif content_hash == 'd4f5b7cbc2ed363ac6fcf89e19353504': assert np.sum(labels_orig == 1639705) == 13327 else: raise RuntimeError( From d11cbe53d8fc0a8c2085e638f01d704ac20dc12d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 18 Oct 2023 08:48:26 -0400 Subject: [PATCH 422/702] FIX: Apply codespell suggestions --- doc/source/gitwash/development_workflow.rst | 2 +- nibabel/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/gitwash/development_workflow.rst b/doc/source/gitwash/development_workflow.rst index 7c117cfcce..696a939ed8 100644 --- a/doc/source/gitwash/development_workflow.rst +++ b/doc/source/gitwash/development_workflow.rst @@ -334,7 +334,7 @@ Rewriting commit history Do this only for your own feature branches. -There's an embarassing typo in a commit you made? Or perhaps the you +There's an embarrassing typo in a commit you made? Or perhaps the you made several false starts you would like the posterity not to see. This can be done via *interactive rebasing*. diff --git a/nibabel/info.py b/nibabel/info.py index 33d1b0aa0d..a608932fa8 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -77,7 +77,7 @@ pip install nibabel[test] pytest --pyargs nibabel -For more inforation, consult the `developer guidelines`_. +For more information, consult the `developer guidelines`_. .. _tox: https://tox.wiki .. _pytest: https://docs.pytest.org From a55d178d5ff092369e30d61832bad99427c74bbb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 18 Oct 2023 08:48:58 -0400 Subject: [PATCH 423/702] CI: Add spellcheck job --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9b12727bda..7eb1730daa 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -168,7 +168,7 @@ jobs: continue-on-error: true strategy: matrix: - check: ['style', 'doctest', 'typecheck'] + check: ['style', 'doctest', 'typecheck', 'spellcheck'] steps: - uses: actions/checkout@v4 From 07100eab8c4cf4b3505019566e37e62e4b826c41 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 18 Oct 2023 08:50:33 -0400 Subject: [PATCH 424/702] MNT: Add codespell to pre-commit --- .pre-commit-config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 137aa49462..2b620a6de3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,3 +40,9 @@ repos: - importlib_resources args: ["nibabel"] pass_filenames: false + - repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell + additional_dependencies: + - tomli From 12b99f9c986e8e4e0966c19e159affc8b8bf5fcb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 18 Oct 2023 09:12:27 -0400 Subject: [PATCH 425/702] DOC: Add docs for using and applying style/linting tools --- doc/source/devel/devguide.rst | 68 +++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 15 deletions(-) diff --git a/doc/source/devel/devguide.rst b/doc/source/devel/devguide.rst index bce5b64aaa..8748270f11 100644 --- a/doc/source/devel/devguide.rst +++ b/doc/source/devel/devguide.rst @@ -95,20 +95,6 @@ advise that you enable merge summaries within git: See :ref:`configure-git` for more detail. -Pre-commit hooks ----------------- - -NiBabel uses pre-commit_ to help committers validate their changes -before committing. To enable these, you can use pipx_:: - - pipx run pre-commit install - -Or install and run:: - - python -m pip install pre-commit - pre-commit install - - Testing ======= @@ -139,6 +125,55 @@ full set of optional dependencies that are available for 64-bit interpreters. If you are using 32-bit Python, replace ``-x64`` with ``-x86``. +Style guide +=========== + +To ensure code consistency and readability, NiBabel has adopted the following +tools: + +* blue_ - An auto-formatter that aims to reduce diffs to relevant lines +* isort_ - An import sorter that groups stdlib, third-party and local imports. +* flake8_ - A style checker that can catch (but generally not fix) common + errors in code. +* codespell_ - A spell checker targeted at source code. +* pre-commit_ - A pre-commit hook manager that runs the above and various + other checks/fixes. + +While some amount of personal preference is involved in selecting and +configuring auto-formatters, their value lies in largely eliminating the +need to think or argue about style. +With pre-commit turned on, you can write in the style that works for you, +and the NiBabel style will be adopted prior to the commit. + +To apply our style checks uniformly, simply run:: + + tox -e style,spellcheck + +To fix any issues found:: + + tox -e style-fix + tox -e spellcheck -- -w + +Occasionally, codespell has a false positive. To ignore the suggestion, add +the intended word to ``tool.codespell.ignore-words-list`` in ``pyproject.toml``. +However, the ignore list is a blunt instrument and could cause a legitimate +misspelling to be missed. Consider choosing a word that does not trigger +codespell before adding it to the ignore list. + +Pre-commit hooks +---------------- + +NiBabel uses pre-commit_ to help committers validate their changes +before committing. To enable these, you can use pipx_:: + + pipx run pre-commit install + +Or install and run:: + + python -m pip install pre-commit + pre-commit install + + Changelog ========= @@ -168,6 +203,9 @@ Please see `our community guidelines `_. Other projects call these guidelines the "code of conduct". -.. _tox: https://tox.wiki +.. _blue: https://blue.readthedocs.io/ +.. _codespell: https://github.com/codespell-project/codespell +.. _flake8: https://flake8.pycqa.org/ .. _pipx: https://pypa.github.io/pipx/ .. _precommit: https://pre-commit.com/ +.. _tox: https://tox.wiki/ From 848aca2a7bc9eb18a0bd1dacd93b3ad12b6b1731 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 18 Oct 2023 10:27:58 -0400 Subject: [PATCH 426/702] MNT: Add py312-dev-x64 environment --- tox.ini | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index dd8e1650b9..bf3d4886dd 100644 --- a/tox.ini +++ b/tox.ini @@ -15,6 +15,8 @@ envlist = py3{9,10,11}-pre-{x86,x64} # x64-only range py312-{full,pre}-x64 + # Special environment for numpy 2.0-dev testing + py312-dev-x64 install doctest style @@ -34,6 +36,7 @@ python = DEPENDS = none: none, install pre: pre + dev: dev full: full, install min: min @@ -48,10 +51,10 @@ install_command = python -I -m pip install -v \ x64: --only-binary numpy,scipy,h5py,pillow \ x86: --only-binary numpy,scipy,h5py,pillow,matplotlib \ - pre: --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ + pre,dev: --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ {opts} {packages} pip_pre = - pre: true + pre,dev: true pass_env = # getpass.getuser() sources for Windows: LOGNAME @@ -78,6 +81,7 @@ deps = # Numpy 2.0 is a major breaking release; we cannot put much effort into # supporting until it's at least RC stable pre: numpy <2.0.dev0 + dev: numpy >=2.0.dev0 # Scipy stopped producing win32 wheels at py310 py3{8,9}-full-x86,x64: scipy >=1.6 # Matplotlib depends on scipy, so cannot be built for py310 on x86 @@ -89,7 +93,7 @@ deps = py3{8,9,10,11}-{full,pre}: indexed_gzip >=1.4 full,pre: pyzstd >=0.14.3 min: pydicom ==2.1 - full,pre: pydicom >=2.1 + full,pre,dev: pydicom >=2.1 # pydicom master seems to be breaking things # pre: pydicom @ git+https://github.com/pydicom/pydicom.git@main From 98489b589cf01469227f3df607125da31fb3895b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 18 Oct 2023 11:01:07 -0400 Subject: [PATCH 427/702] CI: Test NumPy 2.0 --- .github/workflows/test.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9b12727bda..9c4f08eb39 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -119,6 +119,10 @@ jobs: - os: ubuntu-latest python-version: 3.8 dependencies: 'min' + # NumPy 2.0 + - os: ubuntu-latest + python-version: '3.12' + dependencies: 'dev' exclude: - os: ubuntu-latest architecture: x86 From 4815ee5cdc48ece25724a666708c9450a586fc98 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Wed, 18 Oct 2023 19:44:49 +0200 Subject: [PATCH 428/702] fix blue --- nibabel/casting.py | 1 + nibabel/tests/test_analyze.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index e56722676a..1ed36ad440 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -62,6 +62,7 @@ class CastingError(Exception): } # fmt: on + def float_to_int(arr, int_type, nan2zero=True, infmax=False): """Convert floating point array `arr` to type `int_type` diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 75c64d4e53..4e024d6e3b 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -51,9 +51,7 @@ def add_duplicate_types(supported_np_types): # Update supported numpy types with named scalar types that map to the same set of dtypes dtypes = {np.dtype(t) for t in supported_np_types} - supported_np_types.update( - scalar for scalar in sctypes_aliases if np.dtype(scalar) in dtypes - ) + supported_np_types.update(scalar for scalar in sctypes_aliases if np.dtype(scalar) in dtypes) class TestAnalyzeHeader(tws._TestLabeledWrapStruct): From 49048c258184a714f2266e24917ed15bc59e5305 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Wed, 18 Oct 2023 19:47:55 +0200 Subject: [PATCH 429/702] fix spelling --- doc/source/gitwash/development_workflow.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/gitwash/development_workflow.rst b/doc/source/gitwash/development_workflow.rst index 7c117cfcce..696a939ed8 100644 --- a/doc/source/gitwash/development_workflow.rst +++ b/doc/source/gitwash/development_workflow.rst @@ -334,7 +334,7 @@ Rewriting commit history Do this only for your own feature branches. -There's an embarassing typo in a commit you made? Or perhaps the you +There's an embarrassing typo in a commit you made? Or perhaps the you made several false starts you would like the posterity not to see. This can be done via *interactive rebasing*. From c76fe32b2f259a4389b73dd8e5edf1389776d851 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Wed, 18 Oct 2023 19:49:17 +0200 Subject: [PATCH 430/702] rm unused imports --- nibabel/freesurfer/tests/test_io.py | 3 +-- nibabel/tests/test_image_types.py | 5 ----- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 2406679d73..6d6b9da478 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -4,13 +4,12 @@ import struct import time import unittest -import warnings from os.path import isdir from os.path import join as pjoin import numpy as np import pytest -from numpy.testing import assert_allclose, assert_array_equal +from numpy.testing import assert_allclose from ...fileslice import strided_scalar from ...testing import clear_and_catch_warnings diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index 9fd48ee697..da2f93e21f 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -15,19 +15,14 @@ import numpy as np from .. import ( - AnalyzeHeader, - AnalyzeImage, MGHImage, Minc1Image, Minc2Image, - Nifti1Header, Nifti1Image, Nifti1Pair, - Nifti2Header, Nifti2Image, Nifti2Pair, Spm2AnalyzeImage, - Spm99AnalyzeImage, all_image_classes, ) From 079ddc8b0a460f96b6d98880de9681975fb32e5a Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Wed, 18 Oct 2023 20:18:59 +0200 Subject: [PATCH 431/702] try test fix suggested by larsoner --- nibabel/conftest.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 1f9ecd09cf..7a369cdac0 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -1,5 +1,14 @@ +import numpy as np import pytest +from packaging.version import parse + # Ignore warning requesting help with nicom with pytest.warns(UserWarning): import nibabel.nicom + + +def pytest_configure(config): + """Configure pytest options.""" + if parse('1.26') <= parse(np.__version__): + np.set_printoptions(legacy='1.25') From 319f23f7f6a2fb4705d534a47edee5bb68819746 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Wed, 18 Oct 2023 20:36:34 +0200 Subject: [PATCH 432/702] try simpler --- nibabel/conftest.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 7a369cdac0..3b2f749b1d 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -1,8 +1,6 @@ import numpy as np import pytest -from packaging.version import parse - # Ignore warning requesting help with nicom with pytest.warns(UserWarning): import nibabel.nicom @@ -10,5 +8,4 @@ def pytest_configure(config): """Configure pytest options.""" - if parse('1.26') <= parse(np.__version__): - np.set_printoptions(legacy='1.25') + np.set_printoptions(legacy=125) From 2c3b43d7c6941f4ed65b675d452ec514e6ca164a Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 25 Oct 2023 13:49:53 -0400 Subject: [PATCH 433/702] FIX: Only need legacy if on 2.0 --- nibabel/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 3b2f749b1d..a483b4b6e6 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -8,4 +8,5 @@ def pytest_configure(config): """Configure pytest options.""" - np.set_printoptions(legacy=125) + if int(np.__version__[0]) >= 2: + np.set_printoptions(legacy=125) From 9214846f34e661317a5d76f7d6cd68877cd58504 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 25 Oct 2023 13:55:47 -0400 Subject: [PATCH 434/702] FIX: Cast --- nibabel/freesurfer/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index ec6b474b04..8d8bcd3b7c 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -31,7 +31,7 @@ def _fread3(fobj): n : int A 3 byte int """ - b1, b2, b3 = np.fromfile(fobj, '>u1', 3) + b1, b2, b3 = np.fromfile(fobj, '>u1', 3).astype(int) return (b1 << 16) + (b2 << 8) + b3 From ae0e36e40e7d6fe14c0d3fba1e8818074ce31673 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 25 Oct 2023 13:56:28 -0400 Subject: [PATCH 435/702] FIX: Consistency --- nibabel/freesurfer/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 8d8bcd3b7c..95d4eed0f6 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -31,7 +31,7 @@ def _fread3(fobj): n : int A 3 byte int """ - b1, b2, b3 = np.fromfile(fobj, '>u1', 3).astype(int) + b1, b2, b3 = np.fromfile(fobj, '>u1', 3).astype(np.int64) return (b1 << 16) + (b2 << 8) + b3 From aca58c3bb50d9f31e06207ac8df3cf6acd516205 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 25 Oct 2023 15:21:30 -0400 Subject: [PATCH 436/702] FIX: Newbyteorder --- nibabel/streamlines/trk.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 04ac56a51d..177536eda1 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -1,4 +1,4 @@ -# Definition of trackvis header structure: ++# Definition of trackvis header structure: # http://www.trackvis.org/docs/?subsect=fileformat import os @@ -577,7 +577,7 @@ def _read_header(fileobj): endianness = swapped_code # Swap byte order - header_rec = header_rec.newbyteorder() + header_rec = header_rec.view(header_rec.dtype.newbyteorder()) if header_rec['hdr_size'] != TrkFile.HEADER_SIZE: msg = ( f"Invalid hdr_size: {header_rec['hdr_size']} " From c60a2349d383d7ccde1529faa130318ff93017fc Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Wed, 25 Oct 2023 22:36:29 +0200 Subject: [PATCH 437/702] fix typo --- nibabel/streamlines/trk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 177536eda1..966b133d1f 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -1,4 +1,4 @@ -+# Definition of trackvis header structure: +# Definition of trackvis header structure: # http://www.trackvis.org/docs/?subsect=fileformat import os From 1a9ebad8cfd64fc4a1707a948e7c9616ffc021dc Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Thu, 26 Oct 2023 13:34:40 +0200 Subject: [PATCH 438/702] fix more stuff --- nibabel/ecat.py | 2 +- nibabel/freesurfer/tests/test_io.py | 6 ++++-- nibabel/nifti1.py | 8 ++++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 7f477e4a97..1db902d10a 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -923,7 +923,7 @@ def _write_data(self, data, stream, pos, dtype=None, endianness=None): endianness = native_code stream.seek(pos) - make_array_writer(data.newbyteorder(endianness), dtype).to_fileobj(stream) + make_array_writer(data.view(data.dtype.newbyteorder(endianness)), dtype).to_fileobj(stream) def to_file_map(self, file_map=None): """Write ECAT7 image to `file_map` or contained ``self.file_map`` diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 6d6b9da478..8fda72b739 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -111,8 +111,10 @@ def test_geometry(): assert np.array_equal(faces, faces2) # Validate byte ordering - coords_swapped = coords.byteswap().newbyteorder() - faces_swapped = faces.byteswap().newbyteorder() + coords_swapped = coords.byteswap() + coords_swapped = coords_swapped.view(coords_swapped.dtype.newbyteorder()) + faces_swapped = faces.byteswap() + faces_swapped = faces_swapped.view(faces_swapped.dtype.newbyteorder()) assert np.array_equal(coords_swapped, coords) assert np.array_equal(faces_swapped, faces) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index c1b0124ebb..a23bdb5a68 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2443,9 +2443,13 @@ def _get_analyze_compat_dtype(arr): return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32') mn, mx = arr.min(), arr.max() - if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): + if (isinstance(mn, int) and isinstance(mx, int)) or ( + np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32) + ): return np.dtype('int32') - if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): + if (isinstance(mn, float) and isinstance(mx, float)) or ( + np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32) + ): return np.dtype('float32') raise ValueError( From 97e3aa95093ade487c93398413e2194828dee1ae Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Thu, 26 Oct 2023 13:36:11 +0200 Subject: [PATCH 439/702] more fix --- nibabel/freesurfer/tests/test_mghformat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 5a400119ba..189f1a9dd7 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -345,7 +345,7 @@ def test_mghheader_default_structarr(): for endianness in (None,) + BIG_CODES: hdr2 = MGHHeader.default_structarr(endianness=endianness) assert hdr2 == hdr - assert hdr2.newbyteorder('>') == hdr + assert hdr2.view(hdr2.dtype.newbyteorder('>')) == hdr for endianness in LITTLE_CODES: with pytest.raises(ValueError): From a765af030f86036d7a78fa5c1b93615f7119f4b6 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Thu, 26 Oct 2023 13:40:33 +0200 Subject: [PATCH 440/702] fix more stuff --- nibabel/casting.py | 8 ++++---- nibabel/nifti1.py | 2 +- nibabel/tests/test_arraywriters.py | 3 ++- nibabel/tests/test_nifti1.py | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 1ed36ad440..86fbc35103 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -291,7 +291,7 @@ def type_info(np_type): return ret info_64 = np.finfo(np.float64) if dt.kind == 'c': - assert np_type is np.longcomplex + assert np_type is np.clongdouble vals = (nmant, nexp, width / 2) else: assert np_type is np.longdouble @@ -319,7 +319,7 @@ def type_info(np_type): # Oh dear, we don't recognize the type information. Try some known types # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. - if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32): + if np_type not in (np.longdouble, np.clongdouble) or width not in (16, 32): raise FloatingError(f'We had not expected type {np_type}') if vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024): # double pair on PPC. The _check_nmant routine does not work for this @@ -329,13 +329,13 @@ def type_info(np_type): # Got float64 despite everything pass elif _check_nmant(np.longdouble, 112) and _check_maxexp(np.longdouble, 16384): - # binary 128, but with some busted type information. np.longcomplex + # binary 128, but with some busted type information. np.clongdouble # seems to break here too, so we need to use np.longdouble and # complexify two = np.longdouble(2) # See: https://matthew-brett.github.io/pydagogue/floating_point.html max_val = (two**113 - 1) / (two**112) * two**16383 - if np_type is np.longcomplex: + if np_type is np.clongdouble: max_val += 0j ret = dict( min=-max_val, diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a23bdb5a68..64e70b7913 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -89,7 +89,7 @@ if have_binary128(): # Only enable 128 bit floats if we really have IEEE binary 128 longdoubles _float128t: type[np.generic] = np.longdouble - _complex256t: type[np.generic] = np.longcomplex + _complex256t: type[np.generic] = np.clongdouble else: _float128t = np.void _complex256t = np.void diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index b0cace66a2..89e7ac6755 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -61,7 +61,8 @@ def test_arraywriters(): assert aw.out_dtype == arr.dtype assert_array_equal(arr, round_trip(aw)) # Byteswapped should be OK - bs_arr = arr.byteswap().newbyteorder('S') + bs_arr = arr.byteswap() + bs_arr = bs_arr.view(bs_arr.dtype.newbyteorder('S')) bs_aw = klass(bs_arr) bs_aw_rt = round_trip(bs_aw) # assert against original array because POWER7 was running into diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index fbefe99e56..c7c4d1d84b 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -79,7 +79,7 @@ class TestNifti1PairHeader(tana.TestAnalyzeHeader, tspm.HeaderScalingMixin): (np.int8, np.uint16, np.uint32, np.int64, np.uint64, np.complex128) ) if have_binary128(): - supported_np_types = supported_np_types.union((np.longdouble, np.longcomplex)) + supported_np_types = supported_np_types.union((np.longdouble, np.clongdouble)) tana.add_duplicate_types(supported_np_types) def test_empty(self): From df96ae390c685dadc748f2dae59dd5e23c77df4c Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 10:34:49 -0400 Subject: [PATCH 441/702] FIX: check --- nibabel/nifti1.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 64e70b7913..ab310f1ba0 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2443,14 +2443,18 @@ def _get_analyze_compat_dtype(arr): return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32') mn, mx = arr.min(), arr.max() - if (isinstance(mn, int) and isinstance(mx, int)) or ( - np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32) - ): + if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): return np.dtype('int32') - if (isinstance(mn, float) and isinstance(mx, float)) or ( - np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32) - ): + elif (isinstance(mn, int) and isinstance(mx, int)): + info = np.finfo('int32') + if mn >= info.min and mx <= info.max: + return np.dtype('int32') + if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): return np.dtype('float32') + elif (isinstance(mn, float) and isinstance(mx, float): + info = np.finfo('float32') + if mn >= info.min and mx <= info.max: + return np.dtype('float32') raise ValueError( f'Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})' From 83111ea5cd63c5e9856af49cf5c6fd7cbe1bf3d7 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 10:35:00 -0400 Subject: [PATCH 442/702] FIX: check --- nibabel/nifti1.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index ab310f1ba0..c3cccd4849 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2445,13 +2445,13 @@ def _get_analyze_compat_dtype(arr): mn, mx = arr.min(), arr.max() if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): return np.dtype('int32') - elif (isinstance(mn, int) and isinstance(mx, int)): + elif isinstance(mn, int) and isinstance(mx, int): info = np.finfo('int32') if mn >= info.min and mx <= info.max: return np.dtype('int32') if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): return np.dtype('float32') - elif (isinstance(mn, float) and isinstance(mx, float): + elif isinstance(mn, float) and isinstance(mx, float): info = np.finfo('float32') if mn >= info.min and mx <= info.max: return np.dtype('float32') From 6ffea1bf3d1f2130437251fcff38255c51baf048 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 10:38:33 -0400 Subject: [PATCH 443/702] FIX: Python types --- nibabel/nifti1.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index c3cccd4849..9af4fa41ef 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2443,18 +2443,18 @@ def _get_analyze_compat_dtype(arr): return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32') mn, mx = arr.min(), arr.max() - if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): - return np.dtype('int32') - elif isinstance(mn, int) and isinstance(mx, int): + if isinstance(mn, int) and isinstance(mx, int): info = np.finfo('int32') if mn >= info.min and mx <= info.max: return np.dtype('int32') - if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): - return np.dtype('float32') elif isinstance(mn, float) and isinstance(mx, float): info = np.finfo('float32') if mn >= info.min and mx <= info.max: return np.dtype('float32') + elif np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): + return np.dtype('int32') + elif np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): + return np.dtype('float32') raise ValueError( f'Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})' From 86b05976797fd9e86ccbbbd17af135c8eb36da5b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 10:39:07 -0400 Subject: [PATCH 444/702] FIX: Preserve --- nibabel/nifti1.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 9af4fa41ef..7b9bd85876 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2443,18 +2443,18 @@ def _get_analyze_compat_dtype(arr): return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32') mn, mx = arr.min(), arr.max() + if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): + return np.dtype('int32') + if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): + return np.dtype('float32') if isinstance(mn, int) and isinstance(mx, int): info = np.finfo('int32') if mn >= info.min and mx <= info.max: return np.dtype('int32') - elif isinstance(mn, float) and isinstance(mx, float): + if isinstance(mn, float) and isinstance(mx, float): info = np.finfo('float32') if mn >= info.min and mx <= info.max: return np.dtype('float32') - elif np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): - return np.dtype('int32') - elif np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): - return np.dtype('float32') raise ValueError( f'Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})' From fbbd801d987af086893372d010a106bbdbcc89fb Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 10:40:20 -0400 Subject: [PATCH 445/702] FIX: Simplify --- nibabel/quaternions.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index d2fc3ac4ca..e24b33bcc2 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -29,9 +29,7 @@ import numpy as np -from .casting import sctypes - -MAX_FLOAT = sctypes['float'][-1] +MAX_FLOAT = np.finfo(float).max FLOAT_EPS = np.finfo(float).eps From 4630e0d799a5310fb9187753f2cccdab0d7e65be Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 13:30:00 -0400 Subject: [PATCH 446/702] FIX: Maybe --- nibabel/casting.py | 1 + nibabel/nifti1.py | 10 +++------- nibabel/testing/__init__.py | 2 +- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 86fbc35103..b96393d0c2 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -495,6 +495,7 @@ def int_to_float(val, flt_type): # -1 if not isinstance(val, Integral): val = int(str(val)) + val = int(val) faval = np.longdouble(0) while val != 0: f64 = np.float64(val) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 7b9bd85876..7bbb6bf75a 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2443,15 +2443,11 @@ def _get_analyze_compat_dtype(arr): return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32') mn, mx = arr.min(), arr.max() - if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): - return np.dtype('int32') - if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): - return np.dtype('float32') - if isinstance(mn, int) and isinstance(mx, int): - info = np.finfo('int32') + if arr.dtype.kind == 'i': + info = np.iinfo('int32') if mn >= info.min and mx <= info.max: return np.dtype('int32') - if isinstance(mn, float) and isinstance(mx, float): + elif arr.dtype.kind == 'f': info = np.finfo('float32') if mn >= info.min and mx <= info.max: return np.dtype('float32') diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 5baa5e2b86..6674c08f41 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -82,7 +82,7 @@ def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): a = a.astype(float) if b.dtype.kind in 'ui': b = b.astype(float) - assert np.allclose(a, b, rtol=rtol, atol=atol) + np.testing.assert_allclose(a, b, rtol=rtol, atol=atol) def assert_arrays_equal(arrays1, arrays2): From 49b1d41dd0c8f224847b9e9b6b787b7e80b8e5ee Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 13:39:24 -0400 Subject: [PATCH 447/702] FIX: Better --- nibabel/nifti1.py | 2 +- nibabel/tests/test_spm99analyze.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 7bbb6bf75a..890bc2e228 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -2443,7 +2443,7 @@ def _get_analyze_compat_dtype(arr): return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32') mn, mx = arr.min(), arr.max() - if arr.dtype.kind == 'i': + if arr.dtype.kind in 'iu': info = np.iinfo('int32') if mn >= info.min and mx <= info.max: return np.dtype('int32') diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index f65855ce4b..ada92d3b05 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -328,7 +328,8 @@ def test_no_scaling(self, in_dtype, supported_dtype): inter = 10 if hdr.has_data_intercept else 0 mn_in, mx_in = _dt_min_max(in_dtype) - arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype) + mn = -1 if np.dtype(in_dtype).kind != 'u' else 0 + arr = np.array([mn_in, mn, 0, 1, 10, mx_in], dtype=in_dtype) img = img_class(arr, np.eye(4), hdr) img.set_data_dtype(supported_dtype) # Setting the scaling means we don't calculate it later From 5eb5e54081f591019e97e7b1b70a326dc2c51d74 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 13:45:00 -0400 Subject: [PATCH 448/702] FIX: Revert --- nibabel/testing/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 6674c08f41..5baa5e2b86 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -82,7 +82,7 @@ def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): a = a.astype(float) if b.dtype.kind in 'ui': b = b.astype(float) - np.testing.assert_allclose(a, b, rtol=rtol, atol=atol) + assert np.allclose(a, b, rtol=rtol, atol=atol) def assert_arrays_equal(arrays1, arrays2): From b94b7f93e4933fe659f3179fddd5b97e81c95a09 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 26 Oct 2023 20:26:08 +0200 Subject: [PATCH 449/702] MNT: Fix typo found by codespell --- nibabel/tests/test_parrec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index c411d69003..6035d47f8d 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -607,7 +607,7 @@ def test_truncations(): PARRECHeader(gen_info, slice_info) gen_info['max_echoes'] = 1 hdr = PARRECHeader(gen_info, slice_info) - # dyamics + # dynamics gen_info['max_dynamics'] = 3 with pytest.raises(PARRECError): PARRECHeader(gen_info, slice_info) From a1ddae887f0d5cc9be639fbd30dea56b4cbf742e Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 14:38:08 -0400 Subject: [PATCH 450/702] FIX: ComplexWarning --- nibabel/tests/test_proxy_api.py | 7 ++++++- nibabel/tests/test_volumeutils.py | 9 +++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 3b4412ceee..3a713d16de 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -57,6 +57,11 @@ h5py, have_h5py, _ = optional_package('h5py') +try: + from numpy.exceptions import ComplexWarning +except ImportError: # NumPy < 1.25 + from numpy import ComplexWarning + def _some_slicers(shape): ndim = len(shape) @@ -143,7 +148,7 @@ def validate_array_interface_with_dtype(self, pmaker, params): if np.issubdtype(orig.dtype, np.complexfloating): context = clear_and_catch_warnings() context.__enter__() - warnings.simplefilter('ignore', np.ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) for dtype in sctypes['float'] + sctypes['int'] + sctypes['uint']: # Directly coerce with a dtype diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 59a5f1989f..7db67ce2cd 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -69,6 +69,11 @@ FP_RUNTIME_WARN = Version(np.__version__) >= Version('1.24.0.dev0+239') +try: + from numpy.exceptions import ComplexWarning +except ModuleNotFoundError: # NumPy < 1.25 + from numpy import ComplexWarning + def test__is_compressed_fobj(): # _is_compressed helper function @@ -610,7 +615,7 @@ def test_a2f_bad_scaling(): if np.issubdtype(in_type, np.complexfloating) and not np.issubdtype( out_type, np.complexfloating ): - cm = pytest.warns(np.ComplexWarning) + cm = pytest.warns(ComplexWarning) if (slope, inter) == (1, 0): with cm: assert_array_equal( @@ -650,7 +655,7 @@ def test_a2f_nan2zero_range(): arr = np.array([-1, 0, 1, np.nan], dtype=dt) # Error occurs for arrays without nans too arr_no_nan = np.array([-1, 0, 1, 2], dtype=dt) - complex_warn = (np.ComplexWarning,) if np.issubdtype(dt, np.complexfloating) else () + complex_warn = (ComplexWarning,) if np.issubdtype(dt, np.complexfloating) else () # Casting nan to int will produce a RuntimeWarning in numpy 1.24 nan_warn = (RuntimeWarning,) if FP_RUNTIME_WARN else () c_and_n_warn = complex_warn + nan_warn From cd362aa596cca259a68a71272932419476f6d5af Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 14:51:04 -0400 Subject: [PATCH 451/702] FIX: Context --- nibabel/tests/test_floating.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 73e2ed5cc4..5169ce0d96 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -1,9 +1,11 @@ """Test floating point deconstructions and floor methods """ import sys +from contextlib import nullcontext import numpy as np import pytest +from packaging.version import Version from ..casting import ( FloatingError, @@ -27,6 +29,8 @@ LD_INFO = type_info(np.longdouble) +FP_OVERFLOW_WARN = Version(np.__version__) <= Version('2.0.0.dev0') + def dtt2dict(dtt): """Create info dictionary from numpy type""" @@ -149,9 +153,14 @@ def test_as_int(): nexp64 = floor_log2(type_info(np.float64)['max']) with np.errstate(over='ignore'): val = np.longdouble(2**nexp64) * 2 # outside float64 range - with pytest.raises(OverflowError): + assert val > np.finfo('float64').max + if FP_OVERFLOW_WARN: + ctx = pytest.raises(OverflowError) + else: + ctx = nullcontext() + with ctx: as_int(val) - with pytest.raises(OverflowError): + with ctx: as_int(-val) From c32b0d22608f807e110db8efcf60b875e4f24376 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 15:20:21 -0400 Subject: [PATCH 452/702] FIX: One more --- nibabel/tests/test_floating.py | 1 + nibabel/tests/test_volumeutils.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 5169ce0d96..d4342d568f 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -154,6 +154,7 @@ def test_as_int(): with np.errstate(over='ignore'): val = np.longdouble(2**nexp64) * 2 # outside float64 range assert val > np.finfo('float64').max + # TODO: Should this actually still overflow? Does it matter? if FP_OVERFLOW_WARN: ctx = pytest.raises(OverflowError) else: diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 7db67ce2cd..7da9925814 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -544,7 +544,8 @@ def test_a2f_scaled_unscaled(): ): mn_in, mx_in = _dt_min_max(in_dtype) nan_val = np.nan if in_dtype in CFLOAT_TYPES else 10 - arr = np.array([mn_in, -1, 0, 1, mx_in, nan_val], dtype=in_dtype) + mn = 0 if np.dtype(in_dtype).kind == "u" else 1 + arr = np.array([mn_in, mn, 0, 1, mx_in, nan_val], dtype=in_dtype) mn_out, mx_out = _dt_min_max(out_dtype) # 0 when scaled to output will also be the output value for NaN nan_fill = -intercept / divslope @@ -738,9 +739,10 @@ def test_apply_scaling(): f32_arr = np.zeros((1,), dtype=f32) i16_arr = np.zeros((1,), dtype=np.int16) # Check float upcast (not the normal numpy scalar rule) - # This is the normal rule - no upcast from scalar - assert (f32_arr * f64(1)).dtype == np.float32 - assert (f32_arr + f64(1)).dtype == np.float32 + # This is the normal rule - no upcast from Python scalar + # (on NumPy 2.0 it *will* upcast from a np.float64 scalar!) + assert (f32_arr * 1.).dtype == np.float32 + assert (f32_arr + 1.).dtype == np.float32 # The function does upcast though ret = apply_read_scaling(np.float32(0), np.float64(2)) assert ret.dtype == np.float64 From d4596b7fadb7002d951b3e5884f8d9c44742a6ea Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 26 Oct 2023 15:23:25 -0400 Subject: [PATCH 453/702] FIX: Explicit --- nibabel/tests/test_proxy_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 3a713d16de..421bc5bf47 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -59,7 +59,7 @@ try: from numpy.exceptions import ComplexWarning -except ImportError: # NumPy < 1.25 +except ModuleNotFoundError: # NumPy < 1.25 from numpy import ComplexWarning From 0f746c035cd12a153763ecd6f6e8fa6c29df397d Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 1 Nov 2023 14:49:58 -0400 Subject: [PATCH 454/702] Apply suggestions from code review Co-authored-by: Chris Markiewicz --- nibabel/casting.py | 7 +------ nibabel/conftest.py | 9 +++++---- nibabel/quaternions.py | 4 +++- nibabel/tests/test_volumeutils.py | 9 ++++++--- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index b96393d0c2..a26c359d3f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -45,7 +45,6 @@ class CastingError(Exception): ], 'others': [bool, object, bytes, str, np.void], } -# fmt: off sctypes_aliases = { getattr(np, dtype) for dtype in ( @@ -59,8 +58,7 @@ class CastingError(Exception): 'object_', 'void', ) if hasattr(np, dtype) -} -# fmt: on +} # fmt:skip def float_to_int(arr, int_type, nan2zero=True, infmax=False): @@ -492,9 +490,6 @@ def int_to_float(val, flt_type): return flt_type(val) # The following works around a nasty numpy 1.4.1 bug such that: # >>> int(np.uint32(2**32-1) - # -1 - if not isinstance(val, Integral): - val = int(str(val)) val = int(val) faval = np.longdouble(0) while val != 0: diff --git a/nibabel/conftest.py b/nibabel/conftest.py index a483b4b6e6..f2a3f7c06e 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -6,7 +6,8 @@ import nibabel.nicom -def pytest_configure(config): - """Configure pytest options.""" - if int(np.__version__[0]) >= 2: - np.set_printoptions(legacy=125) +@pytest.fixture(scope='session', autouse=True) +def legacy_printoptions(): + from packaging.version import Version + if Version(np.__version__) >= Version('1.22'): + np.set_printoptions(legacy='1.21') diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index e24b33bcc2..d2fc3ac4ca 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -29,7 +29,9 @@ import numpy as np -MAX_FLOAT = np.finfo(float).max +from .casting import sctypes + +MAX_FLOAT = sctypes['float'][-1] FLOAT_EPS = np.finfo(float).eps diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 7da9925814..f19f27f717 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -543,9 +543,12 @@ def test_a2f_scaled_unscaled(): NUMERIC_TYPES, NUMERIC_TYPES, (0, 0.5, -1, 1), (1, 0.5, 2) ): mn_in, mx_in = _dt_min_max(in_dtype) - nan_val = np.nan if in_dtype in CFLOAT_TYPES else 10 - mn = 0 if np.dtype(in_dtype).kind == "u" else 1 - arr = np.array([mn_in, mn, 0, 1, mx_in, nan_val], dtype=in_dtype) + vals = [mn_in, 0, 1, mx_in] + if np.dtype(in_dtype).kind != 'u': + vals.append(-1) + if in_dtype in CFLOAT_TYPES: + vals.append(np.nan) + arr = np.array(vals, dtype=in_dtype) mn_out, mx_out = _dt_min_max(out_dtype) # 0 when scaled to output will also be the output value for NaN nan_fill = -intercept / divslope From e3c72e1aa3de7f0ca443583d350f2eb2f03ffbb9 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 1 Nov 2023 14:53:46 -0400 Subject: [PATCH 455/702] FIX: Style --- nibabel/conftest.py | 1 + nibabel/tests/test_spatialimages.py | 6 +----- nibabel/tests/test_volumeutils.py | 4 ++-- nibabel/tests/test_wrapstruct.py | 2 +- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/nibabel/conftest.py b/nibabel/conftest.py index f2a3f7c06e..cf01392324 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -9,5 +9,6 @@ @pytest.fixture(scope='session', autouse=True) def legacy_printoptions(): from packaging.version import Version + if Version(np.__version__) >= Version('1.22'): np.set_printoptions(legacy='1.21') diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index aacff74b7b..5cad23a22f 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -18,11 +18,7 @@ from .. import load as top_load from ..imageclasses import spatial_axes_first from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage -from ..testing import ( - bytesio_round_trip, - expires, - memmap_after_ufunc, -) +from ..testing import bytesio_round_trip, expires, memmap_after_ufunc from ..tmpdirs import InTemporaryDirectory diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index f19f27f717..6a1fae9047 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -744,8 +744,8 @@ def test_apply_scaling(): # Check float upcast (not the normal numpy scalar rule) # This is the normal rule - no upcast from Python scalar # (on NumPy 2.0 it *will* upcast from a np.float64 scalar!) - assert (f32_arr * 1.).dtype == np.float32 - assert (f32_arr + 1.).dtype == np.float32 + assert (f32_arr * 1.0).dtype == np.float32 + assert (f32_arr + 1.0).dtype == np.float32 # The function does upcast though ret = apply_read_scaling(np.float32(0), np.float64(2)) assert ret.dtype == np.float64 diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 3d08f01149..10b4b3f22c 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -31,8 +31,8 @@ from numpy.testing import assert_array_equal from .. import imageglobals -from ..casting import sctypes from ..batteryrunners import Report +from ..casting import sctypes from ..spatialimages import HeaderDataError from ..volumeutils import Recoder, native_code, swapped_code from ..wrapstruct import LabeledWrapStruct, WrapStruct, WrapStructError From e3a7495af17c0c0fd8b5eac898b04bc6b40af3ad Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 1 Nov 2023 14:58:50 -0400 Subject: [PATCH 456/702] STY: Flake --- nibabel/casting.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index a26c359d3f..743ce47068 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -6,7 +6,6 @@ from __future__ import annotations import warnings -from numbers import Integral from platform import machine, processor import numpy as np From 1bc593c6db9773a7e79637c4a282564b6bf41a3f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 1 Nov 2023 16:46:26 -0400 Subject: [PATCH 457/702] FIX: Test val equiv --- nibabel/tests/test_floating.py | 9 +++++++-- nibabel/tests/test_volumeutils.py | 7 ++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index d4342d568f..c1853e1f66 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -159,10 +159,15 @@ def test_as_int(): ctx = pytest.raises(OverflowError) else: ctx = nullcontext() + out_val = None with ctx: - as_int(val) + out_val = as_int(val) + if out_val is not None: + assert out_val == val with ctx: - as_int(-val) + out_val = as_int(-val) + if out_val is not None: + assert out_val == -val def test_int_to_float(): diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 6a1fae9047..07ca9a6baa 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -68,6 +68,7 @@ NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES FP_RUNTIME_WARN = Version(np.__version__) >= Version('1.24.0.dev0+239') +NP_2 = Version(np.__version__) >= Version('2.0.0.dev0') try: from numpy.exceptions import ComplexWarning @@ -743,9 +744,13 @@ def test_apply_scaling(): i16_arr = np.zeros((1,), dtype=np.int16) # Check float upcast (not the normal numpy scalar rule) # This is the normal rule - no upcast from Python scalar - # (on NumPy 2.0 it *will* upcast from a np.float64 scalar!) assert (f32_arr * 1.0).dtype == np.float32 assert (f32_arr + 1.0).dtype == np.float32 + # This is the normal rule - no upcast from scalar + # before NumPy 2.0, after 2.0, it upcasts + want_dtype = np.float64 if NP_2 else np.float32 + assert (f32_arr * f64(1)).dtype == want_dtype + assert (f32_arr + f64(1)).dtype == want_dtype # The function does upcast though ret = apply_read_scaling(np.float32(0), np.float64(2)) assert ret.dtype == np.float64 From a71eebf0a72a951535fab6d1a8b8b7945c25585e Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 1 Nov 2023 16:48:21 -0400 Subject: [PATCH 458/702] FIX: Version --- nibabel/tests/test_floating.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index c1853e1f66..82fdc4402a 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -29,7 +29,7 @@ LD_INFO = type_info(np.longdouble) -FP_OVERFLOW_WARN = Version(np.__version__) <= Version('2.0.0.dev0') +FP_OVERFLOW_WARN = Version(np.__version__) < Version('2.0.0.dev0') def dtt2dict(dtt): From 443ec37e2451f37e9f24e766921b5b259f8c40f9 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 2 Nov 2023 10:30:45 -0400 Subject: [PATCH 459/702] RF: Remove old as_int() hack --- nibabel/arraywriters.py | 30 +++++-------- nibabel/casting.py | 30 +++---------- nibabel/tests/test_analyze.py | 5 +-- nibabel/tests/test_casting.py | 8 +--- nibabel/tests/test_floating.py | 62 +++++---------------------- nibabel/tests/test_removalschedule.py | 1 + 6 files changed, 30 insertions(+), 106 deletions(-) diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index bdd2d548f8..c813ccdbfa 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -30,15 +30,7 @@ def __init__(self, array, out_dtype=None) """ import numpy as np -from .casting import ( - as_int, - best_float, - floor_exact, - int_abs, - int_to_float, - shared_range, - type_info, -) +from .casting import best_float, floor_exact, int_abs, int_to_float, shared_range, type_info from .volumeutils import array_to_file, finite_range @@ -152,9 +144,8 @@ def scaling_needed(self): # No scaling needed if data already fits in output type # But note - we need to convert to ints, to avoid conversion to float # during comparisons, and therefore int -> float conversions which are - # not exact. Only a problem for uint64 though. We need as_int here to - # work around a numpy 1.4.1 bug in uint conversion - if as_int(mn) >= as_int(info.min) and as_int(mx) <= as_int(info.max): + # not exact. Only a problem for uint64 though. + if int(mn) >= int(info.min) and int(mx) <= int(info.max): return False return True @@ -392,7 +383,7 @@ def _do_scaling(self): out_max, out_min = info.max, info.min # If left as int64, uint64, comparisons will default to floats, and # these are inexact for > 2**53 - so convert to int - if as_int(mx) <= as_int(out_max) and as_int(mn) >= as_int(out_min): + if int(mx) <= int(out_max) and int(mn) >= int(out_min): # already in range return # (u)int to (u)int scaling @@ -410,7 +401,7 @@ def _iu2iu(self): # that deals with max neg ints. abs problem only arises when all # the data is set to max neg integer value o_min, o_max = shared_range(self.scaler_dtype, out_dt) - if mx <= 0 and int_abs(mn) <= as_int(o_max): # sign flip enough? + if mx <= 0 and int_abs(mn) <= int(o_max): # sign flip enough? # -1.0 * arr will be in scaler_dtype precision self.slope = -1.0 return @@ -546,14 +537,13 @@ def to_fileobj(self, fileobj, order='F'): def _iu2iu(self): # (u)int to (u)int - mn, mx = (as_int(v) for v in self.finite_range()) + mn, mx = (int(v) for v in self.finite_range()) # range may be greater than the largest integer for this type. - # as_int needed to work round numpy 1.4.1 int casting bug out_dtype = self._out_dtype # Options in this method are scaling using intercept only. These will # have to pass through ``self.scaler_dtype`` (because the intercept is # in this type). - o_min, o_max = (as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)) + o_min, o_max = (int(v) for v in shared_range(self.scaler_dtype, out_dtype)) type_range = o_max - o_min mn2mx = mx - mn if mn2mx <= type_range: # might offset be enough? @@ -565,12 +555,12 @@ def _iu2iu(self): else: # int output - take midpoint to 0 # ceil below increases inter, pushing scale up to 0.5 towards # -inf, because ints have abs min == abs max + 1 - midpoint = mn + as_int(np.ceil(mn2mx / 2.0)) + midpoint = mn + int(np.ceil(mn2mx / 2.0)) # Floor exact decreases inter, so pulling scaled values more # positive. This may make mx - inter > t_max inter = floor_exact(midpoint, self.scaler_dtype) # Need to check still in range after floor_exact-ing - int_inter = as_int(inter) + int_inter = int(inter) assert mn - int_inter >= o_min if mx - int_inter <= o_max: self.inter = inter @@ -598,7 +588,7 @@ def _range_scale(self, in_min, in_max): # same as double so in_range will be 2**64 - thus overestimating # slope slightly. Casting to int needed to allow in_max-in_min to # be larger than the largest (u)int value - in_min, in_max = as_int(in_min), as_int(in_max) + in_min, in_max = int(in_min), int(in_max) in_range = int_to_float(in_max - in_min, big_float) # Cast to float for later processing. in_min, in_max = (int_to_float(v, big_float) for v in (in_min, in_max)) diff --git a/nibabel/casting.py b/nibabel/casting.py index 743ce47068..15a8b93496 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -10,6 +10,8 @@ import numpy as np +from .deprecated import deprecate_with_version + class CastingError(Exception): pass @@ -402,6 +404,7 @@ def _check_maxexp(np_type, maxexp): return np.isfinite(two ** (maxexp - 1)) and not np.isfinite(two**maxexp) +@deprecate_with_version('as_int() is deprecated. Use int() instead.', '5.2.0', '7.0.0') def as_int(x, check=True): """Return python integer representation of number @@ -411,9 +414,6 @@ def as_int(x, check=True): It is also useful to work around a numpy 1.4.1 bug in conversion of uints to python ints. - This routine will still raise an OverflowError for values that are outside - the range of float64. - Parameters ---------- x : object @@ -439,28 +439,10 @@ def as_int(x, check=True): >>> as_int(2.1, check=False) 2 """ - x = np.array(x) - if x.dtype.kind in 'iu': - # This works around a nasty numpy 1.4.1 bug such that: - # >>> int(np.uint32(2**32-1) - # -1 - return int(str(x)) ix = int(x) - if ix == x: - return ix - fx = np.floor(x) - if check and fx != x: + if check and ix != x: raise FloatingError(f'Not an integer: {x}') - if not fx.dtype.type == np.longdouble: - return int(x) - # Subtract float64 chunks until we have all of the number. If the int is - # too large, it will overflow - ret = 0 - while fx != 0: - f64 = np.float64(fx) - fx -= f64 - ret += int(f64) - return ret + return ix def int_to_float(val, flt_type): @@ -549,7 +531,7 @@ def floor_exact(val, flt_type): if not np.isfinite(fval): return fval info = type_info(flt_type) - diff = val - as_int(fval) + diff = val - int(fval) if diff >= 0: # floating point value <= val return fval # Float casting made the value go up diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 4e024d6e3b..cb7b8d686d 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -26,7 +26,7 @@ from .. import imageglobals from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError -from ..casting import as_int, sctypes_aliases +from ..casting import sctypes_aliases from ..nifti1 import Nifti1Header from ..optpkg import optional_package from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types @@ -308,8 +308,7 @@ def test_shapes(self): assert hdr.get_data_shape() == shape # Check max works, but max+1 raises error dim_dtype = hdr.structarr['dim'].dtype - # as_int for safety to deal with numpy 1.4.1 int conversion errors - mx = as_int(np.iinfo(dim_dtype).max) + mx = int(np.iinfo(dim_dtype).max) shape = (mx,) hdr.set_data_shape(shape) assert hdr.get_data_shape() == shape diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index d04b996bb6..d458254010 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -10,7 +10,6 @@ from ..casting import ( CastingError, able_int_type, - as_int, best_float, float_to_int, floor_log2, @@ -101,11 +100,6 @@ def test_casting(): mn, mx = shared_range(ft, it) with np.errstate(invalid='ignore'): iarr = float_to_int(farr, it) - # Dammit - for long doubles we need to jump through some hoops not - # to round to numbers outside the range - if ft is np.longdouble: - mn = as_int(mn) - mx = as_int(mx) exp_arr = np.array([mn, mx, mn, mx, 0, 0, 11], dtype=it) assert_array_equal(iarr, exp_arr) # Now test infmax version @@ -149,7 +143,7 @@ def test_int_abs(): assert udtype.kind == 'u' assert idtype.itemsize == udtype.itemsize mn, mx = in_arr - e_mn = as_int(mx) + 1 # as_int needed for numpy 1.4.1 casting + e_mn = int(mx) + 1 assert int_abs(mx) == mx assert int_abs(mn) == e_mn assert_array_equal(int_abs(in_arr), [e_mn, mx]) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 82fdc4402a..f9c49ceb10 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -11,7 +11,6 @@ FloatingError, _check_maxexp, _check_nmant, - as_int, ceil_exact, floor_exact, floor_log2, @@ -128,48 +127,6 @@ def test_check_nmant_nexp(): assert _check_maxexp(t, ti['maxexp']) -def test_as_int(): - # Integer representation of number - assert as_int(2.0) == 2 - assert as_int(-2.0) == -2 - with pytest.raises(FloatingError): - as_int(2.1) - with pytest.raises(FloatingError): - as_int(-2.1) - assert as_int(2.1, False) == 2 - assert as_int(-2.1, False) == -2 - v = np.longdouble(2**64) - assert as_int(v) == 2**64 - # Have all long doubles got 63+1 binary bits of precision? Windows 32-bit - # longdouble appears to have 52 bit precision, but we avoid that by checking - # for known precisions that are less than that required - try: - nmant = type_info(np.longdouble)['nmant'] - except FloatingError: - nmant = 63 # Unknown precision, let's hope it's at least 63 - v = np.longdouble(2) ** (nmant + 1) - 1 - assert as_int(v) == 2 ** (nmant + 1) - 1 - # Check for predictable overflow - nexp64 = floor_log2(type_info(np.float64)['max']) - with np.errstate(over='ignore'): - val = np.longdouble(2**nexp64) * 2 # outside float64 range - assert val > np.finfo('float64').max - # TODO: Should this actually still overflow? Does it matter? - if FP_OVERFLOW_WARN: - ctx = pytest.raises(OverflowError) - else: - ctx = nullcontext() - out_val = None - with ctx: - out_val = as_int(val) - if out_val is not None: - assert out_val == val - with ctx: - out_val = as_int(-val) - if out_val is not None: - assert out_val == -val - - def test_int_to_float(): # Convert python integer to floating point # Standard float types just return cast value @@ -215,23 +172,24 @@ def test_int_to_float(): return # test we recover precision just above nmant i = 2 ** (nmant + 1) - 1 - assert as_int(int_to_float(i, LD)) == i - assert as_int(int_to_float(-i, LD)) == -i + assert int(int_to_float(i, LD)) == i + assert int(int_to_float(-i, LD)) == -i # If longdouble can cope with 2**64, test if nmant >= 63: # Check conversion to int; the line below causes an error subtracting # ints / uint64 values, at least for Python 3.3 and numpy dev 1.8 big_int = np.uint64(2**64 - 1) - assert as_int(int_to_float(big_int, LD)) == big_int + assert int(int_to_float(big_int, LD)) == big_int -def test_as_int_np_fix(): - # Test as_int works for integers. We need as_int for integers because of a +def test_int_np_regression(): + # Test int works as expected for integers. + # We previously used a custom as_int() for integers because of a # numpy 1.4.1 bug such that int(np.uint32(2**32-1) == -1 for t in sctypes['int'] + sctypes['uint']: info = np.iinfo(t) mn, mx = np.array([info.min, info.max], dtype=t) - assert (mn, mx) == (as_int(mn), as_int(mx)) + assert (mn, mx) == (int(mn), int(mx)) def test_floor_exact_16(): @@ -264,8 +222,8 @@ def test_floor_exact(): to_test.append(np.longdouble) # When numbers go above int64 - I believe, numpy comparisons break down, # so we have to cast to int before comparison - int_flex = lambda x, t: as_int(floor_exact(x, t)) - int_ceex = lambda x, t: as_int(ceil_exact(x, t)) + int_flex = lambda x, t: int(floor_exact(x, t)) + int_ceex = lambda x, t: int(ceil_exact(x, t)) for t in to_test: # A number bigger than the range returns the max info = type_info(t) @@ -302,7 +260,7 @@ def test_floor_exact(): for i in range(5): iv = 2 ** (nmant + 1 + i) gap = 2 ** (i + 1) - assert as_int(t(iv) + t(gap)) == iv + gap + assert int(t(iv) + t(gap)) == iv + gap for j in range(1, gap): assert int_flex(iv + j, t) == iv assert int_flex(iv + gap + j, t) == iv + gap diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index db99ae3a46..eaf47774d1 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -17,6 +17,7 @@ ( '8.0.0', [ + ('nibabel.casting', 'as_int'), ('nibabel.tmpdirs', 'TemporaryDirectory'), ], ), From fce4911d0a8e824b5853705ecda3da14a19a38de Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 2 Nov 2023 10:48:19 -0400 Subject: [PATCH 460/702] RF: Remove old int_to_float() hack --- nibabel/arraywriters.py | 15 ++++---- nibabel/casting.py | 17 ++------- nibabel/tests/test_casting.py | 5 +-- nibabel/tests/test_floating.py | 53 ++++----------------------- nibabel/tests/test_removalschedule.py | 1 + 5 files changed, 21 insertions(+), 70 deletions(-) diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index c813ccdbfa..751eb6ad1f 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -30,7 +30,7 @@ def __init__(self, array, out_dtype=None) """ import numpy as np -from .casting import best_float, floor_exact, int_abs, int_to_float, shared_range, type_info +from .casting import best_float, floor_exact, int_abs, shared_range, type_info from .volumeutils import array_to_file, finite_range @@ -418,7 +418,7 @@ def _range_scale(self, in_min, in_max): # not lose precision because min/max are of fp type. out_min, out_max = np.array((out_min, out_max), dtype=big_float) else: # (u)int - out_min, out_max = (int_to_float(v, big_float) for v in (out_min, out_max)) + out_min, out_max = (big_float(v) for v in (out_min, out_max)) if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: raise WriterError( @@ -584,14 +584,13 @@ def _range_scale(self, in_min, in_max): in_min, in_max = np.array([in_min, in_max], dtype=big_float) in_range = np.diff([in_min, in_max]) else: # max possible (u)int range is 2**64-1 (int64, uint64) - # int_to_float covers this range. On windows longdouble is the - # same as double so in_range will be 2**64 - thus overestimating - # slope slightly. Casting to int needed to allow in_max-in_min to - # be larger than the largest (u)int value + # On windows longdouble is the same as double so in_range will be 2**64 - + # thus overestimating slope slightly. Casting to int needed to allow + # in_max-in_min to be larger than the largest (u)int value in_min, in_max = int(in_min), int(in_max) - in_range = int_to_float(in_max - in_min, big_float) + in_range = big_float(in_max - in_min) # Cast to float for later processing. - in_min, in_max = (int_to_float(v, big_float) for v in (in_min, in_max)) + in_min, in_max = (big_float(v) for v in (in_min, in_max)) if out_dtype.kind == 'f': # Type range, these are also floats info = type_info(out_dtype) diff --git a/nibabel/casting.py b/nibabel/casting.py index 15a8b93496..101e0a0018 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -445,6 +445,7 @@ def as_int(x, check=True): return ix +@deprecate_with_version('int_to_float(..., dt) is deprecated. Use dt() instead.', '5.2.0', '7.0.0') def int_to_float(val, flt_type): """Convert integer `val` to floating point type `flt_type` @@ -467,17 +468,7 @@ def int_to_float(val, flt_type): f : numpy scalar of type `flt_type` """ - if flt_type is not np.longdouble: - return flt_type(val) - # The following works around a nasty numpy 1.4.1 bug such that: - # >>> int(np.uint32(2**32-1) - val = int(val) - faval = np.longdouble(0) - while val != 0: - f64 = np.float64(val) - faval += f64 - val -= int(f64) - return faval + return flt_type(val) def floor_exact(val, flt_type): @@ -524,8 +515,8 @@ def floor_exact(val, flt_type): val = int(val) flt_type = np.dtype(flt_type).type sign = 1 if val > 0 else -1 - try: # int_to_float deals with longdouble safely - fval = int_to_float(val, flt_type) + try: + fval = flt_type(val) except OverflowError: return sign * np.inf if not np.isfinite(fval): diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index d458254010..f345952aac 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -14,7 +14,6 @@ float_to_int, floor_log2, int_abs, - int_to_float, longdouble_precision_improved, sctypes, shared_range, @@ -41,7 +40,7 @@ def test_shared_range(): if casted_mx != imax: # The shared_range have told us that they believe the imax does # not have an exact representation. - fimax = int_to_float(imax, ft) + fimax = ft(imax) if np.isfinite(fimax): assert int(fimax) != imax # Therefore the imax, cast back to float, and to integer, will @@ -67,7 +66,7 @@ def test_shared_range(): if casted_mn != imin: # The shared_range have told us that they believe the imin does # not have an exact representation. - fimin = int_to_float(imin, ft) + fimin = ft(imin) if np.isfinite(fimin): assert int(fimin) != imin # Therefore the imin, cast back to float, and to integer, will diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index f9c49ceb10..2f1342932d 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -15,7 +15,6 @@ floor_exact, floor_log2, have_binary128, - int_to_float, longdouble_precision_improved, ok_floats, on_powerpc, @@ -127,59 +126,21 @@ def test_check_nmant_nexp(): assert _check_maxexp(t, ti['maxexp']) -def test_int_to_float(): - # Convert python integer to floating point - # Standard float types just return cast value - for ie3 in IEEE_floats: - nmant = type_info(ie3)['nmant'] - for p in range(nmant + 3): - i = 2**p + 1 - assert int_to_float(i, ie3) == ie3(i) - assert int_to_float(-i, ie3) == ie3(-i) - # IEEEs in this case are binary formats only - nexp = floor_log2(type_info(ie3)['max']) - # Values too large for the format - smn, smx = -(2 ** (nexp + 1)), 2 ** (nexp + 1) - if ie3 is np.float64: - with pytest.raises(OverflowError): - int_to_float(smn, ie3) - with pytest.raises(OverflowError): - int_to_float(smx, ie3) - else: - assert int_to_float(smn, ie3) == ie3(smn) - assert int_to_float(smx, ie3) == ie3(smx) - # Longdoubles do better than int, we hope - LD = np.longdouble - # up to integer precision of float64 nmant, we get the same result as for - # casting directly +def test_int_longdouble_np_regression(): + # Test longdouble conversion from int works as expected + # Previous versions of numpy would fail, and we used a custom int_to_float() + # function. This test remains to ensure we don't need to bring it back. nmant = type_info(np.float64)['nmant'] - for p in range(nmant + 2): # implicit - i = 2**p - 1 - assert int_to_float(i, LD) == LD(i) - assert int_to_float(-i, LD) == LD(-i) - # Above max of float64, we're hosed - nexp64 = floor_log2(type_info(np.float64)['max']) - smn64, smx64 = -(2 ** (nexp64 + 1)), 2 ** (nexp64 + 1) - # The algorithm here implemented goes through float64, so supermax and - # supermin will cause overflow errors - with pytest.raises(OverflowError): - int_to_float(smn64, LD) - with pytest.raises(OverflowError): - int_to_float(smx64, LD) - try: - nmant = type_info(np.longdouble)['nmant'] - except FloatingError: # don't know where to test - return # test we recover precision just above nmant i = 2 ** (nmant + 1) - 1 - assert int(int_to_float(i, LD)) == i - assert int(int_to_float(-i, LD)) == -i + assert int(np.longdouble(i)) == i + assert int(np.longdouble(-i)) == -i # If longdouble can cope with 2**64, test if nmant >= 63: # Check conversion to int; the line below causes an error subtracting # ints / uint64 values, at least for Python 3.3 and numpy dev 1.8 big_int = np.uint64(2**64 - 1) - assert int(int_to_float(big_int, LD)) == big_int + assert int(np.longdouble(big_int)) == big_int def test_int_np_regression(): diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index eaf47774d1..b11a621802 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -18,6 +18,7 @@ '8.0.0', [ ('nibabel.casting', 'as_int'), + ('nibabel.casting', 'int_to_float'), ('nibabel.tmpdirs', 'TemporaryDirectory'), ], ), From 3ed2e6d473060adc7749e237c72eaff5ccb9c547 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 2 Nov 2023 11:19:34 -0400 Subject: [PATCH 461/702] TEST: Add fixture for relaxing digit limits --- nibabel/conftest.py | 17 +++++++++++++++++ nibabel/tests/test_floating.py | 12 +++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/nibabel/conftest.py b/nibabel/conftest.py index cf01392324..c5cf96e13f 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -1,3 +1,5 @@ +import sys + import numpy as np import pytest @@ -12,3 +14,18 @@ def legacy_printoptions(): if Version(np.__version__) >= Version('1.22'): np.set_printoptions(legacy='1.21') + + +@pytest.fixture +def max_digits(): + # Set maximum number of digits for int/str conversion for + # duration of a test + try: + orig_max_str_digits = sys.get_int_max_str_digits() + yield sys.set_int_max_str_digits + sys.set_int_max_str_digits(orig_max_str_digits) + except AttributeError: + # Nothing to do for versions of Python that lack these methods + # They were added as DoS protection in Python 3.11 and backported to + # some other versions. + yield lambda x: None diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 2f1342932d..3e6e7f426b 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -172,7 +172,9 @@ def test_floor_exact_64(): assert floor_exact(test_val, np.float64) == 2 ** (e + 1) - int(gap) -def test_floor_exact(): +def test_floor_exact(max_digits): + max_digits(4950) # max longdouble is ~10**4932 + to_test = IEEE_floats + [float] try: type_info(np.longdouble)['nmant'] @@ -188,11 +190,11 @@ def test_floor_exact(): for t in to_test: # A number bigger than the range returns the max info = type_info(t) - assert floor_exact(2**5000, t) == np.inf - assert ceil_exact(2**5000, t) == np.inf + assert floor_exact(10**4933, t) == np.inf + assert ceil_exact(10**4933, t) == np.inf # A number more negative returns -inf - assert floor_exact(-(2**5000), t) == -np.inf - assert ceil_exact(-(2**5000), t) == -np.inf + assert floor_exact(-(10**4933), t) == -np.inf + assert ceil_exact(-(10**4933), t) == -np.inf # Check around end of integer precision nmant = info['nmant'] for i in range(nmant + 1): From bbfd0092c054d026f9fe232100c78d80e7258d8b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 2 Nov 2023 12:41:03 -0400 Subject: [PATCH 462/702] MNT: Add doctest and coverage pragma --- nibabel/casting.py | 5 +++++ nibabel/conftest.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 101e0a0018..f3e04f30f4 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -467,6 +467,11 @@ def int_to_float(val, flt_type): ------- f : numpy scalar of type `flt_type` + + Examples + -------- + >>> int_to_float(1, np.float32) + 1.0 """ return flt_type(val) diff --git a/nibabel/conftest.py b/nibabel/conftest.py index c5cf96e13f..5eba256fa5 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -24,7 +24,7 @@ def max_digits(): orig_max_str_digits = sys.get_int_max_str_digits() yield sys.set_int_max_str_digits sys.set_int_max_str_digits(orig_max_str_digits) - except AttributeError: + except AttributeError: # pragma: no cover # Nothing to do for versions of Python that lack these methods # They were added as DoS protection in Python 3.11 and backported to # some other versions. From 0fb584bc81ffceff810608a22c4fa73872042925 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 26 Oct 2023 20:55:18 +0200 Subject: [PATCH 463/702] MNT: Use raw string to avoid escaping '\' --- tools/gitwash_dumper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py index cabff5c0af..7472658ecd 100755 --- a/tools/gitwash_dumper.py +++ b/tools/gitwash_dumper.py @@ -223,7 +223,7 @@ def main(): out_path, cp_globs=(pjoin('gitwash', '*'),), rep_globs=('*.rst',), - renames=(('\.rst$', options.source_suffix),), + renames=((r'\.rst$', options.source_suffix),), ) make_link_targets( project_name, From 719ecf726d1252139be6d6b5b600c2cae23a6799 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 7 Nov 2023 10:11:58 -0500 Subject: [PATCH 464/702] MNT: Install indexed_gzip on 3.12, add dev to all full,pre groups --- tox.ini | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index cb66b06bef..48a203b0dc 100644 --- a/tox.ini +++ b/tox.ini @@ -88,10 +88,9 @@ deps = py3{8,9}-full-x86,x64: matplotlib >=3.4 # h5py stopped producing win32 wheels at py39 py38-full-x86,x64: h5py >=2.10 - full,pre: pillow >=8.1 - # indexed_gzip missing py312 wheels - py3{8,9,10,11}-{full,pre}: indexed_gzip >=1.4 - full,pre: pyzstd >=0.14.3 + full,pre,dev: pillow >=8.1 + full,pre,dev: indexed_gzip >=1.4 + full,pre,dev: pyzstd >=0.14.3 min: pydicom ==2.1 full,pre,dev: pydicom >=2.1 # pydicom master seems to be breaking things From aaea514e93f83b063b1cd46d917feb6a442f2f4b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 7 Nov 2023 11:06:48 -0500 Subject: [PATCH 465/702] MNT: Better sort of minimal dependencies --- tox.ini | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tox.ini b/tox.ini index 48a203b0dc..cdf7879b2b 100644 --- a/tox.ini +++ b/tox.ini @@ -65,19 +65,21 @@ pass_env = NIPY_EXTRA_TESTS extras = test deps = + # General minimum dependencies: pin based on API usage + min: packaging ==17 + min: importlib_resources ==1.3; python_version < '3.9' # NEP29/SPEC0 + 1yr: Test on minor release series within the last 3 years # We're extending this to all optional dependencies # This only affects the range that we test on; numpy is the only non-optional # dependency, and will be the only one to affect pip environment resolution. min: numpy ==1.20 - min: packaging ==17 - min: importlib_resources ==1.3; python_version < '3.9' - min: scipy ==1.6 - min: matplotlib ==3.4 min: h5py ==2.10 - min: pillow ==8.1 min: indexed_gzip ==1.4 + min: matplotlib ==3.4 + min: pillow ==8.1 + min: pydicom ==2.1 min: pyzstd ==0.14.3 + min: scipy ==1.6 # Numpy 2.0 is a major breaking release; we cannot put much effort into # supporting until it's at least RC stable pre: numpy <2.0.dev0 @@ -91,7 +93,6 @@ deps = full,pre,dev: pillow >=8.1 full,pre,dev: indexed_gzip >=1.4 full,pre,dev: pyzstd >=0.14.3 - min: pydicom ==2.1 full,pre,dev: pydicom >=2.1 # pydicom master seems to be breaking things # pre: pydicom @ git+https://github.com/pydicom/pydicom.git@main From bfebbc7d4a873bf473a48d39c12b2bf18c73680b Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 26 Oct 2023 20:40:19 +0200 Subject: [PATCH 466/702] MNT: Use tuples instead of list where possible Suggested by refurb: [FURB109]: Replace `in [x, y, z]` with `in (x, y, z)` --- nibabel/cifti2/cifti2.py | 8 ++++---- nibabel/cifti2/parse_cifti2.py | 8 ++++---- nibabel/cmdline/ls.py | 2 +- nibabel/cmdline/parrec2nii.py | 2 +- nibabel/cmdline/utils.py | 4 ++-- nibabel/freesurfer/io.py | 2 +- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/parse_gifti_fast.py | 2 +- nibabel/streamlines/array_sequence.py | 8 ++++---- nibabel/viewers.py | 2 +- 10 files changed, 20 insertions(+), 20 deletions(-) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 34aed5a9ed..452bceb7ea 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -968,13 +968,13 @@ def vertex_indices(self, value): def _to_xml_element(self): brain_model = xml.Element('BrainModel') - for key in [ + for key in ( 'IndexOffset', 'IndexCount', 'ModelType', 'BrainStructure', 'SurfaceNumberOfVertices', - ]: + ): attr = _underscore(key) value = getattr(self, attr) if value is not None: @@ -1157,14 +1157,14 @@ def _to_xml_element(self): mat_ind_map = xml.Element('MatrixIndicesMap') dims_as_strings = [str(dim) for dim in self.applies_to_matrix_dimension] mat_ind_map.attrib['AppliesToMatrixDimension'] = ','.join(dims_as_strings) - for key in [ + for key in ( 'IndicesMapToDataType', 'NumberOfSeriesPoints', 'SeriesExponent', 'SeriesStart', 'SeriesStep', 'SeriesUnit', - ]: + ): attr = _underscore(key) value = getattr(self, attr) if value is not None: diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index c7bfb953f9..48c2e06537 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -203,13 +203,13 @@ def StartElementHandler(self, name, attrs): applies_to_matrix_dimension=dimensions, indices_map_to_data_type=attrs['IndicesMapToDataType'], ) - for key, dtype in [ + for key, dtype in ( ('NumberOfSeriesPoints', int), ('SeriesExponent', int), ('SeriesStart', float), ('SeriesStep', float), ('SeriesUnit', str), - ]: + ): if key in attrs: setattr(mim, _underscore(key), dtype(attrs[key])) matrix = self.struct_state[-1] @@ -366,13 +366,13 @@ def StartElementHandler(self, name, attrs): 'BrainModel element can only be a child of a MatrixIndicesMap ' 'with CIFTI_INDEX_TYPE_BRAIN_MODELS type' ) - for key, dtype in [ + for key, dtype in ( ('IndexOffset', int), ('IndexCount', int), ('ModelType', str), ('BrainStructure', str), ('SurfaceNumberOfVertices', int), - ]: + ): if key in attrs: setattr(model, _underscore(key), dtype(attrs[key])) if model.brain_structure not in CIFTI_BRAIN_STRUCTURES: diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 4f504910a2..ff41afbd0a 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -110,7 +110,7 @@ def proc_file(f, opts): if ( hasattr(h, 'has_data_slope') and (h.has_data_slope or h.has_data_intercept) - and not h.get_slope_inter() in [(1.0, 0.0), (None, None)] + and not h.get_slope_inter() in ((1.0, 0.0), (None, None)) ): row += ['@l*%.3g+%.3g' % h.get_slope_inter()] else: diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index c04a6e0196..9340626395 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -414,7 +414,7 @@ def main(): verbose.switch = opts.verbose - if opts.origin not in ['scanner', 'fov']: + if opts.origin not in ('scanner', 'fov'): error(f"Unrecognized value for --origin: '{opts.origin}'.", 1) if opts.dwell_time and opts.field_strength is None: error('Need --field-strength for dwell time calculation', 1) diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 8e9d45251e..2149235704 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -73,7 +73,7 @@ def table2string(table, out=None): if item.startswith('@'): align = item[1] item = item[2:] - if align not in ['l', 'r', 'c', 'w']: + if align not in ('l', 'r', 'c', 'w'): raise ValueError(f'Unknown alignment {align}. Known are l,r,c') else: align = 'c' @@ -81,7 +81,7 @@ def table2string(table, out=None): nspacesl = max(ceil((col_width[j] - len(item)) / 2.0), 0) nspacesr = max(col_width[j] - nspacesl - len(item), 0) - if align in ['w', 'c']: + if align in ('w', 'c'): pass elif align == 'l': nspacesl, nspacesr = 0, nspacesl + nspacesr diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 95d4eed0f6..b4d6ef2a3a 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -63,7 +63,7 @@ def _read_volume_info(fobj): return volume_info volume_info['head'] = head - for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', 'zras', 'cras']: + for key in ('valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', 'zras', 'cras'): pair = fobj.readline().decode('utf-8').split('=') if pair[0].strip() != key or len(pair) != 2: raise OSError('Error parsing volume info.') diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 16261ee679..76bad4677a 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -228,7 +228,7 @@ def _to_xml_element(self): label = xml.SubElement(labeltable, 'Label') label.attrib['Key'] = str(ele.key) label.text = ele.label - for attr in ['Red', 'Green', 'Blue', 'Alpha']: + for attr in ('Red', 'Green', 'Blue', 'Alpha'): if getattr(ele, attr.lower(), None) is not None: label.attrib[attr] = str(getattr(ele, attr.lower())) return labeltable diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index e4a9be4bd6..7d8eacb825 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -333,7 +333,7 @@ def EndElementHandler(self, name): self.fsm_state.pop() self.coordsys = None - elif name in ['DataSpace', 'TransformedSpace', 'MatrixData', 'Name', 'Value', 'Data']: + elif name in ('DataSpace', 'TransformedSpace', 'MatrixData', 'Name', 'Value', 'Data'): self.write_to = None elif name == 'Label': diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index faa5d2390d..dd9b3c57d0 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -72,7 +72,7 @@ def fn_binary_op(self, value): fn.__name__ = op fn.__doc__ = getattr(np.ndarray, op).__doc__ - for op in [ + for op in ( '__add__', '__sub__', '__mul__', @@ -85,14 +85,14 @@ def fn_binary_op(self, value): '__or__', '__and__', '__xor__', - ]: + ): _wrap(cls, op=op, inplace=False) _wrap(cls, op=f"__i{op.strip('_')}__", inplace=True) - for op in ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__']: + for op in ('__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__'): _wrap(cls, op) - for op in ['__neg__', '__abs__', '__invert__']: + for op in ('__neg__', '__abs__', '__invert__'): _wrap(cls, op, unary=True) return cls diff --git a/nibabel/viewers.py b/nibabel/viewers.py index f2b32a1fd9..60ebd3a256 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -501,7 +501,7 @@ def _on_keypress(self, event): """Handle mpl keypress events""" if event.key is not None and 'escape' in event.key: self.close() - elif event.key in ['=', '+']: + elif event.key in ('=', '+'): # increment volume index new_idx = min(self._data_idx[3] + 1, self.n_volumes) self._set_volume_index(new_idx, update_slices=True) From c1c38c1a3ae42a9de2420868fa6f6ec98707e761 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 26 Oct 2023 20:43:28 +0200 Subject: [PATCH 467/702] MNT: Use list comprehension instead of calling append() Suggested by refurb: [FURB138]: Consider using list comprehension --- nibabel/nifti1.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 890bc2e228..e0bdd20201 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1636,10 +1636,11 @@ def set_slice_times(self, slice_times): labels = so_recoder.value_set('label') labels.remove('unknown') - matching_labels = [] - for label in labels: - if np.all(st_order == self._slice_time_order(label, n_timed)): - matching_labels.append(label) + matching_labels = [ + label + for label in labels + if np.all(st_order == self._slice_time_order(label, n_timed)) + ] if not matching_labels: raise HeaderDataError(f'slice ordering of {st_order} fits with no known scheme') From 8461ef97c31fbd25c5e63b7a9ccc23e762d5e208 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 8 Nov 2023 18:45:19 +0100 Subject: [PATCH 468/702] =?UTF-8?q?MNT:=20`[:]`=20=E2=86=92=20`.copy()`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Apply a refurb suggestion that does make the intent clearer: [FURB145]: Replace `x[:]` with `x.copy()` --- nibabel/spm2analyze.py | 2 +- nibabel/spm99analyze.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index fff3ecf086..f63785807c 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -11,7 +11,7 @@ from . import spm99analyze as spm99 # module import -image_dimension_dtd = spm99.image_dimension_dtd[:] +image_dimension_dtd = spm99.image_dimension_dtd.copy() image_dimension_dtd[image_dimension_dtd.index(('funused2', 'f4'))] = ('scl_inter', 'f4') # Full header numpy dtype combined across sub-fields diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 9c5becc6f6..974f8609cf 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -22,10 +22,10 @@ """ Support subtle variations of SPM version of Analyze """ header_key_dtd = analyze.header_key_dtd # funused1 in dime subfield is scalefactor -image_dimension_dtd = analyze.image_dimension_dtd[:] +image_dimension_dtd = analyze.image_dimension_dtd.copy() image_dimension_dtd[image_dimension_dtd.index(('funused1', 'f4'))] = ('scl_slope', 'f4') # originator text field used as image origin (translations) -data_history_dtd = analyze.data_history_dtd[:] +data_history_dtd = analyze.data_history_dtd.copy() data_history_dtd[data_history_dtd.index(('originator', 'S10'))] = ('origin', 'i2', (5,)) # Full header numpy dtype combined across sub-fields From 6e48dcfe8c6f4918724f6300762bfae1ccfb76b0 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 8 Nov 2023 09:49:28 +0100 Subject: [PATCH 469/702] MNT: do not refer to the optional data packages The rationale is that script `doc/source/devel/register_me.py` is a Python 2 script that is not compatible with Python 3. Looks like the whole machinery has not been used for ages. --- doc/source/devel/data_pkg_design.rst | 298 --------------------------- doc/source/devel/devdiscuss.rst | 1 - doc/source/devel/register_me.py | 47 ----- doc/source/installing_data.rst | 80 ------- 4 files changed, 426 deletions(-) delete mode 100644 doc/source/devel/data_pkg_design.rst delete mode 100644 doc/source/devel/register_me.py delete mode 100644 doc/source/installing_data.rst diff --git a/doc/source/devel/data_pkg_design.rst b/doc/source/devel/data_pkg_design.rst deleted file mode 100644 index eabf2ea7e8..0000000000 --- a/doc/source/devel/data_pkg_design.rst +++ /dev/null @@ -1,298 +0,0 @@ -.. _data-package-design: - -Design of data packages for the nibabel and the nipy suite -========================================================== - -See :ref:`data-package-discuss` for a more general discussion of design -issues. - -When developing or using nipy, many data files can be useful. We divide the -data files nipy uses into at least 3 categories - -#. *test data* - data files required for routine code testing -#. *template data* - data files required for algorithms to function, - such as templates or atlases -#. *example data* - data files for running examples, or optional tests - -Files used for routine testing are typically very small data files. They are -shipped with the software, and live in the code repository. For example, in -the case of ``nipy`` itself, there are some test files that live in the module -path ``nipy.testing.data``. Nibabel ships data files in -``nibabel.tests.data``. See :doc:`add_test_data` for discussion. - -*template data* and *example data* are example of *data packages*. What -follows is a discussion of the design and use of data packages. - -.. testsetup:: - - # Make fake data and template directories - import os - from os.path import join as pjoin - import tempfile - tmpdir = tempfile.mkdtemp() - os.environ['NIPY_USER_DIR'] = tmpdir - for subdir in ('data', 'templates'): - files_dir = pjoin(tmpdir, 'nipy', subdir) - os.makedirs(files_dir) - with open(pjoin(files_dir, 'config.ini'), 'wt') as fobj: - fobj.write( - """[DEFAULT] - version = 0.2 - """) - -Use cases for data packages -+++++++++++++++++++++++++++ - -Using the data package -`````````````````````` - -The programmer can use the data like this: - -.. testcode:: - - from nibabel.data import make_datasource - - templates = make_datasource(dict(relpath='nipy/templates')) - fname = templates.get_filename('ICBM152', '2mm', 'T1.nii.gz') - -where ``fname`` will be the absolute path to the template image -``ICBM152/2mm/T1.nii.gz``. - -The programmer can insist on a particular version of a ``datasource``: - ->>> if templates.version < '0.4': -... raise ValueError('Need datasource version at least 0.4') -Traceback (most recent call last): -... -ValueError: Need datasource version at least 0.4 - -If the repository cannot find the data, then: - ->>> make_datasource(dict(relpath='nipy/implausible')) -Traceback (most recent call last): - ... -nibabel.data.DataError: ... - -where ``DataError`` gives a helpful warning about why the data was not -found, and how it should be installed. - -Warnings during installation -```````````````````````````` - -The example data and template data may be important, and so we want to warn -the user if NIPY cannot find either of the two sets of data when installing -the package. Thus:: - - python setup.py install - -will import nipy after installation to check whether these raise an error: - ->>> from nibabel.data import make_datasource ->>> templates = make_datasource(dict(relpath='nipy/templates')) ->>> example_data = make_datasource(dict(relpath='nipy/data')) - -and warn the user accordingly, with some basic instructions for how to -install the data. - -.. _find-data: - -Finding the data -```````````````` - -The routine ``make_datasource`` will look for data packages that have been -installed. For the following call: - ->>> templates = make_datasource(dict(relpath='nipy/templates')) - -the code will: - -#. Get a list of paths where data is known to be stored with - ``nibabel.data.get_data_path()`` -#. For each of these paths, search for directory ``nipy/templates``. If - found, and of the correct format (see below), return a datasource, - otherwise raise an Exception - -The paths collected by ``nibabel.data.get_data_paths()`` are constructed from -':' (Unix) or ';' separated strings. The source of the strings (in the order -in which they will be used in the search above) are: - -#. The value of the ``NIPY_DATA_PATH`` environment variable, if set -#. A section = ``DATA``, parameter = ``path`` entry in a - ``config.ini`` file in ``nipy_dir`` where ``nipy_dir`` is - ``$HOME/.nipy`` or equivalent. -#. Section = ``DATA``, parameter = ``path`` entries in configuration - ``.ini`` files, where the ``.ini`` files are found by - ``glob.glob(os.path.join(etc_dir, '*.ini')`` and ``etc_dir`` is - ``/etc/nipy`` on Unix, and some suitable equivalent on Windows. -#. The result of ``os.path.join(sys.prefix, 'share', 'nipy')`` -#. If ``sys.prefix`` is ``/usr``, we add ``/usr/local/share/nipy``. We - need this because Python >= 2.6 in Debian / Ubuntu does default installs to - ``/usr/local``. -#. The result of ``get_nipy_user_dir()`` - -Requirements for a data package -``````````````````````````````` - -To be a valid NIPY project data package, you need to satisfy: - -#. The installer installs the data in some place that can be found using - the method defined in :ref:`find-data`. - -We recommend that: - -#. By default, you install data in a standard location such as - ``/share/nipy`` where ```` is the standard Python - prefix obtained by ``>>> import sys; print sys.prefix`` - -Remember that there is a distinction between the NIPY project - the -umbrella of neuroimaging in python - and the NIPY package - the main -code package in the NIPY project. Thus, if you want to install data -under the NIPY *package* umbrella, your data might go to -``/usr/share/nipy/nipy/packagename`` (on Unix). Note ``nipy`` twice - -once for the project, once for the package. If you want to install data -under - say - the ``pbrain`` package umbrella, that would go in -``/usr/share/nipy/pbrain/packagename``. - -Data package format -``````````````````` - -The following tree is an example of the kind of pattern we would expect -in a data directory, where the ``nipy-data`` and ``nipy-templates`` -packages have been installed:: - - - `-- nipy - |-- data - | |-- config.ini - | `-- placeholder.txt - `-- templates - |-- ICBM152 - | `-- 2mm - | `-- T1.nii.gz - |-- colin27 - | `-- 2mm - | `-- T1.nii.gz - `-- config.ini - -The ```` directory is the directory that will appear somewhere in -the list from ``nibabel.data.get_data_path()``. The ``nipy`` subdirectory -signifies data for the ``nipy`` package (as opposed to other -NIPY-related packages such as ``pbrain``). The ``data`` subdirectory of -``nipy`` contains files from the ``nipy-data`` package. In the -``nipy/data`` or ``nipy/templates`` directories, there is a -``config.ini`` file, that has at least an entry like this:: - - [DEFAULT] - version = 0.2 - -giving the version of the data package. - -.. _data-package-design-install: - -Installing the data -``````````````````` - -We use python distutils to install data packages, and the ``data_files`` -mechanism to install the data. On Unix, with the following command:: - - python setup.py install --prefix=/my/prefix - -data will go to:: - - /my/prefix/share/nipy - -For the example above this will result in these subdirectories:: - - /my/prefix/share/nipy/nipy/data - /my/prefix/share/nipy/nipy/templates - -because ``nipy`` is both the project, and the package to which the data -relates. - -If you install to a particular location, you will need to add that location to -the output of ``nibabel.data.get_data_path()`` using one of the mechanisms -above, for example, in your system configuration:: - - export NIPY_DATA_PATH=/my/prefix/share/nipy - -Packaging for distributions -``````````````````````````` - -For a particular data package - say ``nipy-templates`` - distributions -will want to: - -#. Install the data in set location. The default from ``python setup.py - install`` for the data packages will be ``/usr/share/nipy`` on Unix. -#. Point a system installation of NIPY to these data. - -For the latter, the most obvious route is to copy an ``.ini`` file named for -the data package into the NIPY ``etc_dir``. In this case, on Unix, we will -want a file called ``/etc/nipy/nipy_templates.ini`` with contents:: - - [DATA] - path = /usr/share/nipy - -Current implementation -`````````````````````` - -This section describes how we (the nipy community) implement data packages at -the moment. - -The data in the data packages will not usually be under source control. This -is because images don't compress very well, and any change in the data will -result in a large extra storage cost in the repository. If you're pretty -clear that the data files aren't going to change, then a repository could work -OK. - -The data packages will be available at a central release location. For now -this will be: http://nipy.org/data-packages/ . - -A package, such as ``nipy-templates-0.2.tar.gz`` will have the following sort -of structure:: - - - - |-- setup.py - |-- README.txt - |-- MANIFEST.in - `-- templates - |-- ICBM152 - | |-- 1mm - | | `-- T1_brain.nii.gz - | `-- 2mm - | `-- T1.nii.gz - |-- colin27 - | `-- 2mm - | `-- T1.nii.gz - `-- config.ini - - -There should be only one ``nipy/packagename`` directory delivered by a -particular package. For example, this package installs ``nipy/templates``, -but does not contain ``nipy/data``. - -Making a new package tarball is simply: - -#. Downloading and unpacking e.g. ``nipy-templates-0.1.tar.gz`` to form the - directory structure above; -#. Making any changes to the directory; -#. Running ``setup.py sdist`` to recreate the package. - -The process of making a release should be: - -#. Increment the major or minor version number in the ``config.ini`` file; -#. Make a package tarball as above; -#. Upload to distribution site. - -There is an example nipy data package ``nipy-examplepkg`` in the -``examples`` directory of the NIPY repository. - -The machinery for creating and maintaining data packages is available at -https://github.com/nipy/data-packaging. - -See the ``README.txt`` file there for more information. - -.. testcleanup:: - - import shutil - shutil.rmtree(tmpdir) diff --git a/doc/source/devel/devdiscuss.rst b/doc/source/devel/devdiscuss.rst index c864928d60..8383558838 100644 --- a/doc/source/devel/devdiscuss.rst +++ b/doc/source/devel/devdiscuss.rst @@ -21,7 +21,6 @@ progress. spm_use modified_images - data_pkg_design data_pkg_discuss data_pkg_uses scaling diff --git a/doc/source/devel/register_me.py b/doc/source/devel/register_me.py deleted file mode 100644 index 017f873abf..0000000000 --- a/doc/source/devel/register_me.py +++ /dev/null @@ -1,47 +0,0 @@ -import configparser as cfp -import sys -from os.path import abspath, dirname, expanduser -from os.path import join as pjoin - -if sys.platform == 'win32': - HOME_INI = pjoin(expanduser('~'), '_dpkg', 'local.dsource') -else: - HOME_INI = pjoin(expanduser('~'), '.dpkg', 'local.dsource') -SYS_INI = pjoin(abspath('etc'), 'dpkg', 'local.dsource') -OUR_PATH = dirname(__file__) -OUR_META = pjoin(OUR_PATH, 'meta.ini') -DISCOVER_INIS = {'user': HOME_INI, 'system': SYS_INI} - - -def main(): - # Get ini file to which to write - try: - reg_to = sys.argv[1] - except IndexError: - reg_to = 'user' - if reg_to in ('user', 'system'): - ini_fname = DISCOVER_INIS[reg_to] - else: # it is an ini file name - ini_fname = reg_to - - # Read parameters for our distribution - meta = cfp.ConfigParser() - files = meta.read(OUR_META) - if len(files) == 0: - raise RuntimeError('Missing meta.ini file') - name = meta.get('DEFAULT', 'name') - version = meta.get('DEFAULT', 'version') - - # Write into ini file - dsource = cfp.ConfigParser() - dsource.read(ini_fname) - if not dsource.has_section(name): - dsource.add_section(name) - dsource.set(name, version, OUR_PATH) - dsource.write(file(ini_fname, 'wt')) - - print(f'Registered package {name}, {version} to {ini_fname}') - - -if __name__ == '__main__': - main() diff --git a/doc/source/installing_data.rst b/doc/source/installing_data.rst deleted file mode 100644 index ce32de2375..0000000000 --- a/doc/source/installing_data.rst +++ /dev/null @@ -1,80 +0,0 @@ -:orphan: - -.. _installing-data: - -Installing data packages -======================== - -nibabel includes some machinery for using optional data packages. We use data -packages for some of the DICOM tests in nibabel. There are also data packages -for standard template images, and other packages for components of nipy, -including the main nipy package. - -For more details on data package design, see :ref:`data-package-design`. - -We haven't yet made a nice automated way of downloading and installing the -packages. For the moment you can find packages for the data and template files -at http://nipy.org/data-packages. - -Data package installation as an administrator ---------------------------------------------- - -The installation procedure, for now, is very basic. For example, let us -say that you want the 'nipy-templates' package at -http://nipy.org/data-packages/nipy-templates-0.1.tar.gz -. You simply download this archive, unpack it, and then run the standard -``python setup.py install`` on it. On a unix system this might look -like:: - - curl -O http://nipy.org/data-packages/nipy-templates-0.1.tar.gz - tar zxvf nipy-templates-0.1.tar.gz - cd nipy-templates-0.1 - sudo python setup.py install - -On windows, download the file, extract the archive to a folder using the -GUI, and then, using the windows shell or similar:: - - cd c:\path\to\extracted\files - python setup.py install - -Non-administrator data package installation -------------------------------------------- - -The commands above assume you are installing into the default system -directories. If you want to install into a custom directory, then (in -python, or ipython, or a text editor) look at the help for -``nipy.utils.data.get_data_path()`` . There are instructions there for -pointing your nipy installation to the installed data. - -On unix -~~~~~~~ - -For example, say you installed with:: - - cd nipy-templates-0.1 - python setup.py install --prefix=/home/my-user/some-dir - -Then you may want to do make a file ``~/.nipy/config.ini`` with the -following contents:: - - [DATA] - /home/my-user/some-dir/share/nipy - -On windows -~~~~~~~~~~ - -Say you installed with (windows shell):: - - cd nipy-templates-0.1 - python setup.py install --prefix=c:\some\path - -Then first, find out your home directory:: - - python -c "import os; print os.path.expanduser('~')" - -Let's say that was ``c:\Documents and Settings\My User``. Then, make a -new file called ``c:\Documents and Settings\My User\_nipy\config.ini`` -with contents:: - - [DATA] - c:\some\path\share\nipy From 7155e772c53d28a9fc4ffdbf4640b8ef3867ab3b Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 3 Dec 2023 21:57:59 +0100 Subject: [PATCH 470/702] MNT: remove more stuff about optional data package --- doc/source/devel/data_pkg_uses.rst | 255 ----------------------------- doc/source/devel/devdiscuss.rst | 2 - 2 files changed, 257 deletions(-) delete mode 100644 doc/source/devel/data_pkg_uses.rst diff --git a/doc/source/devel/data_pkg_uses.rst b/doc/source/devel/data_pkg_uses.rst deleted file mode 100644 index 8573e06cb7..0000000000 --- a/doc/source/devel/data_pkg_uses.rst +++ /dev/null @@ -1,255 +0,0 @@ -.. _data-pkg-uses: - -######################################## -Data package usecases and implementation -######################################## - -******** -Usecases -******** - -We are here working from :doc:`data_pkg_discuss` - -Prundles -======== - -See :ref:`prundle`. - -An *local path* format prundle is a directory on the local file system with prundle data stored in files in a -on the local filesystem. - -Examples -======== - -We'll call our package `dang` - data package new generation. - -Create local-path prundle -------------------------- - -:: - - >>> import os - >>> import tempfile - >>> pth = tempfile.mkdtemp() # temporary directory - -Make a pinstance object:: - - >>> from dang import Pinstance - >>> pri = Prundle(name='my-package') - >>> pri.pkg_name - 'my-package' - >>> pri.meta - {} - -Now we make a prundle. First a directory to contain it:: - - >>> import os - >>> import tempfile - >>> pth = tempfile.mkdtemp() # temporary directory - - >>> from dang.prundle import LocalPathPrundle - >>> prun = LocalPathPrundle(pri, pth) - -At the moment there's nothing in the directory. The 'write' method will write -the meta information - here just the package name:: - - >>> prun.write() # writes meta.ini file - >>> os.listdir(pth) - ['meta.ini'] - -The local path prundle data is just the set of files in the temporary directory -named in ``pth`` above. - -Now we've written the package, we can get it by a single call that reads in the -``meta.ini`` file:: - - >>> prun_back = LocalPathPrundle.from_path(pth) - >>> prun_back.pkg_name - 'my-package' - -Getting prundle data --------------------- - -The file-system prundle formats can return content by file names. - -For example, for the local path ``prun`` distribution objects we have seen so -far, the following should work:: - - >>> fobj = prun.get_fileobj('a_file.txt') - -In fact, local path distribution objects also have a ``path`` attribute:: - - >>> fname = os.path.join(prun.path, 'a_file.txt') - -The ``path`` attribute might not make sense for objects with greater -abstraction over the file-system - for example objects encapsulating web -content. - -********* -Discovery -********* - -So far, in order to create a prundle object, we have to know where the prundle -is (the path). - -We want to be able to tell the system where prundles are - and the system will -then be able to return a prundle on request - perhaps by package name. The -system here is answering a :ref:`prundle-discovery` query. - -We will then want to ask our packaging system whether it knows about the -prundle we are interested in. - -Discovery sources -================= - -A discovery source is an object that can answer a discovery query. -Specifically, it is an object with a ``discover`` method, like this:: - - >>> import dang - >>> dsrc = dang.get_source('local-system') - >>> dquery_result = dsrc.discover('my-package', version='0') - >>> dquery_result[0].pkg_name - 'my-package' - >>> dquery_result = dsrc.discover('implausible-pkg', version='0') - >>> len(dquery_result) - 0 - -The discovery version number spec may allow comparison operators, as for -``distutils.version.LooseVersion``:: - - >>> res = dsrc.discover(name='my-package', version='>=0') - >>> prun = rst[0] - >>> prun.pkg_name - 'my-package' - >>> prun.meta['version'] - '0' - -Default discovery sources -========================= - -We've used the ``local-system`` discovery source in this call:: - - >>> dsrc = dpkg.get_source('local-system') - -The ``get_source`` function is a convenience function that returns default -discovery sources by name. There are at least two named discovery sources, -``local-system``, and ``local-user``. ``local-system`` is a discovery source -for packages that are installed system-wide (``/usr/share/data`` type -installation in \*nix). ``local-user`` is for packages installed for this user -only (``/home/user/data`` type installations in \*nix). - -Discovery source pools -====================== - -We'll typically have more than one source from which we'd like to query. The -obvious case is where we want to look for both system and local sources. For -this we have a *source pool* which simply returns the first known distribution -from a list of sources. Something like this:: - - >>> local_sys = dpkg.get_source('local-system') - >>> local_usr = dpkg.get_source('local-user') - >>> src_pool = dpkg.SourcePool((local_usr, local_sys)) - >>> dq_res = src_pool.discover('my-package', version='0') - >>> dq_res[0].pkg_name - 'my-package' - -We'll often want to do exactly this, so we'll add this source pool to those -that can be returned from our ``get_source`` convenience function:: - - >>> src_pool = dpkg.get_source('local-pool') - -Register a prundle -================== - -In order to register a prundle, we need a prundle object and a -discovery source:: - - >>> from dang.prundle import LocalPathPrundle - >>> prun = LocalPathDistribution.from_path(path=/a/path') - >>> local_usr = dang.get_source('local-user') - >>> local_usr.register(prun) - -Let us then write the source to disk:: - - >>> local_usr.write() - -Now, when we start another process as the same user, we can do this:: - - >>> import dang - >>> local_usr = dang.get_source('local-user') - >>> prun = local_usr.discover('my-package', '0')[0] - -************** -Implementation -************** - -Here are some notes. We had the hope that we could implement something that -would be simple enough that someone using the system would not need our code, -but could work from the specification. - -Local path prundles -=================== - -These are directories accessible on the local filesystem. The directory needs -to give information about the prundle name and optionally, version, tag, -revision id and maybe other metadata. An ``ini`` file is probably enough for -this - something like a ``meta.ini`` file in the directory with:: - - [DEFAULT] - name = my-package - version = 0 - -might be enough to get started. - -Discovery sources -================= - -The discovery source has to be able to return prundle objects for the -prundles it knows about:: - - [my-package] - 0 = /some/path - 0.1 = /another/path - [another-package] - 0 = /further/path - -Registering a package -===================== - -So far we have a local path distribution, that is a directory with some files -in it, and our own ``meta.ini`` file, containing the package name and version. -How does this package register itself to the default sources? Of course, we -could use ``dpkg`` as above:: - - >>> dst = dpkg.LocalPathDistribution.from_path(path='/a/path') - >>> local_usr = dpkg.get_source('local-user') - >>> local_usr.register(dst) - >>> local_usr.save() - -but we wanted to be able to avoid using ``dpkg``. To do this, there might be -a supporting script, in the distribution directory, called ``register_me.py``, -of form given in :download:`register_me.py`. - -Using discovery sources without dpkg -==================================== - -The local discovery sources are ini files, so it would be easy to read and use -these outside the dpkg system, as long as the locations of the ini files are -well defined. Here is the code from ``register_me.py`` defining these files:: - - import os - import sys - - if sys.platform == 'win32': - _home_dpkg_sdir = '_dpkg' - _sys_drive, _ = os.path.splitdrive(sys.prefix) - else: - _home_dpkg_sdir = '.dpkg' - _sys_drive = '/' - # Can we get the user directory? - _home = os.path.expanduser('~') - if _home == '~': # if not, the user ini file is undefined - HOME_INI = None - else: - HOME_INI = os.path.join(_home, _home_dpkg_sdir, 'local.dsource') - SYS_INI = os.path.join(_sys_drive, 'etc', 'dpkg', 'local.dsource') diff --git a/doc/source/devel/devdiscuss.rst b/doc/source/devel/devdiscuss.rst index 8383558838..bc23e823c2 100644 --- a/doc/source/devel/devdiscuss.rst +++ b/doc/source/devel/devdiscuss.rst @@ -21,7 +21,5 @@ progress. spm_use modified_images - data_pkg_discuss - data_pkg_uses scaling bv_formats From f5eee8637d2f9ea2d01578f344d1e09fe022311e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 19 Sep 2023 15:12:26 -0400 Subject: [PATCH 471/702] ENH: Add copy() method to ArrayProxy --- nibabel/arrayproxy.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 12a0a7caf3..f123e98d75 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -217,6 +217,15 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non ) self._lock = RLock() + def copy(self): + spec = self._shape, self._dtype, self._offset, self._slope, self._inter + return ArrayProxy( + self.file_like, + spec, + mmap=self._mmap, + keep_file_open=self._keep_file_open, + ) + def __del__(self): """If this ``ArrayProxy`` was created with ``keep_file_open=True``, the open file object is closed if necessary. From 65228f041df0dc63bb20000dcd2a1571e47abc22 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 22 Sep 2023 08:25:35 -0400 Subject: [PATCH 472/702] ENH: Copy lock if filehandle is shared, add tests --- nibabel/arrayproxy.py | 39 ++++++++++++++++++++------------ nibabel/tests/test_arrayproxy.py | 28 +++++++++++++++++++---- 2 files changed, 48 insertions(+), 19 deletions(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index f123e98d75..57d8aa0f8b 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -58,6 +58,7 @@ if ty.TYPE_CHECKING: # pragma: no cover import numpy.typing as npt + from typing_extensions import Self # PY310 # Taken from numpy/__init__.pyi _DType = ty.TypeVar('_DType', bound=np.dtype[ty.Any]) @@ -212,19 +213,29 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non self.order = order # Flags to keep track of whether a single ImageOpener is created, and # whether a single underlying file handle is created. - self._keep_file_open, self._persist_opener = self._should_keep_file_open( - file_like, keep_file_open - ) + self._keep_file_open, self._persist_opener = self._should_keep_file_open(keep_file_open) self._lock = RLock() - def copy(self): + def _has_fh(self) -> bool: + """Determine if our file-like is a filehandle or path""" + return hasattr(self.file_like, 'read') and hasattr(self.file_like, 'seek') + + def copy(self) -> Self: + """Create a new ArrayProxy for the same file and parameters + + If the proxied file is an open file handle, the new ArrayProxy + will share a lock with the old one. + """ spec = self._shape, self._dtype, self._offset, self._slope, self._inter - return ArrayProxy( + new = self.__class__( self.file_like, spec, mmap=self._mmap, keep_file_open=self._keep_file_open, ) + if self._has_fh(): + new._lock = self._lock + return new def __del__(self): """If this ``ArrayProxy`` was created with ``keep_file_open=True``, @@ -245,13 +256,13 @@ def __setstate__(self, state): self.__dict__.update(state) self._lock = RLock() - def _should_keep_file_open(self, file_like, keep_file_open): + def _should_keep_file_open(self, keep_file_open): """Called by ``__init__``. This method determines how to manage ``ImageOpener`` instances, and the underlying file handles - the behaviour depends on: - - whether ``file_like`` is an an open file handle, or a path to a + - whether ``self.file_like`` is an an open file handle, or a path to a ``'.gz'`` file, or a path to a non-gzip file. - whether ``indexed_gzip`` is present (see :attr:`.openers.HAVE_INDEXED_GZIP`). @@ -270,24 +281,24 @@ def _should_keep_file_open(self, file_like, keep_file_open): and closed on each file access. The internal ``_keep_file_open`` flag is only relevant if - ``file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is + ``self.file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is present. This method returns the values to be used for the internal ``_persist_opener`` and ``_keep_file_open`` flags; these values are derived according to the following rules: - 1. If ``file_like`` is a file(-like) object, both flags are set to + 1. If ``self.file_like`` is a file(-like) object, both flags are set to ``False``. 2. If ``keep_file_open`` (as passed to :meth:``__init__``) is ``True``, both internal flags are set to ``True``. - 3. If ``keep_file_open`` is ``False``, but ``file_like`` is not a path + 3. If ``keep_file_open`` is ``False``, but ``self.file_like`` is not a path to a ``.gz`` file or ``indexed_gzip`` is not present, both flags are set to ``False``. - 4. If ``keep_file_open`` is ``False``, ``file_like`` is a path to a + 4. If ``keep_file_open`` is ``False``, ``self.file_like`` is a path to a ``.gz`` file, and ``indexed_gzip`` is present, ``_persist_opener`` is set to ``True``, and ``_keep_file_open`` is set to ``False``. In this case, file handle management is delegated to the @@ -296,8 +307,6 @@ def _should_keep_file_open(self, file_like, keep_file_open): Parameters ---------- - file_like : object - File-like object or filename, as passed to ``__init__``. keep_file_open : { True, False } Flag as passed to ``__init__``. @@ -320,10 +329,10 @@ def _should_keep_file_open(self, file_like, keep_file_open): raise ValueError('keep_file_open must be one of {None, True, False}') # file_like is a handle - keep_file_open is irrelevant - if hasattr(file_like, 'read') and hasattr(file_like, 'seek'): + if self._has_fh(): return False, False # if the file is a gzip file, and we have_indexed_gzip, - have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz') + have_igzip = openers.HAVE_INDEXED_GZIP and self.file_like.endswith('.gz') persist_opener = keep_file_open or have_igzip return keep_file_open, persist_opener diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index e50caa54c9..acf6099859 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -553,16 +553,36 @@ def test_keep_file_open_true_false_invalid(): ArrayProxy(fname, ((10, 10, 10), dtype)) +def islock(l): + # isinstance doesn't work on threading.Lock? + return hasattr(l, 'acquire') and hasattr(l, 'release') + + def test_pickle_lock(): # Test that ArrayProxy can be pickled, and that thread lock is created - def islock(l): - # isinstance doesn't work on threading.Lock? - return hasattr(l, 'acquire') and hasattr(l, 'release') - proxy = ArrayProxy('dummyfile', ((10, 10, 10), np.float32)) assert islock(proxy._lock) pickled = pickle.dumps(proxy) unpickled = pickle.loads(pickled) assert islock(unpickled._lock) assert proxy._lock is not unpickled._lock + + +def test_copy(): + # Test copying array proxies + + # If the file-like is a file name, get a new lock + proxy = ArrayProxy('dummyfile', ((10, 10, 10), np.float32)) + assert islock(proxy._lock) + copied = proxy.copy() + assert islock(copied._lock) + assert proxy._lock is not copied._lock + + # If an open filehandle, the lock should be shared to + # avoid changing filehandle state in critical sections + proxy = ArrayProxy(BytesIO(), ((10, 10, 10), np.float32)) + assert islock(proxy._lock) + copied = proxy.copy() + assert islock(copied._lock) + assert proxy._lock is copied._lock From 1c1845f75c4e2cfacfa4fa8b485adf6b09b650a1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Dec 2023 08:19:19 -0500 Subject: [PATCH 473/702] TEST: Check IndexedGzipFile ArrayProxys are copied properly --- nibabel/tests/test_arrayproxy.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index acf6099859..a207e4ed6d 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -23,7 +23,7 @@ from .. import __version__ from ..arrayproxy import ArrayProxy, get_obj_dtype, is_proxy, reshape_dataobj from ..deprecator import ExpiredDeprecationError -from ..nifti1 import Nifti1Header +from ..nifti1 import Nifti1Header, Nifti1Image from ..openers import ImageOpener from ..testing import memmap_after_ufunc from ..tmpdirs import InTemporaryDirectory @@ -586,3 +586,20 @@ def test_copy(): copied = proxy.copy() assert islock(copied._lock) assert proxy._lock is copied._lock + + +def test_copy_with_indexed_gzip_handle(tmp_path): + indexed_gzip = pytest.importorskip('indexed_gzip') + + spec = ((50, 50, 50, 50), np.float32, 352, 1, 0) + data = np.arange(np.prod(spec[0]), dtype=spec[1]).reshape(spec[0]) + fname = str(tmp_path / 'test.nii.gz') + Nifti1Image(data, np.eye(4)).to_filename(fname) + + with indexed_gzip.IndexedGzipFile(fname) as fobj: + proxy = ArrayProxy(fobj, spec) + copied = proxy.copy() + + assert proxy.file_like is copied.file_like + assert np.array_equal(proxy[0, 0, 0], copied[0, 0, 0]) + assert np.array_equal(proxy[-1, -1, -1], copied[-1, -1, -1]) From 86b2e536c7a5570d4ed76d71b369c4a6f98d8716 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Dec 2023 08:52:06 -0500 Subject: [PATCH 474/702] CI: Add workflow_dispatch trigger to tests --- .github/workflows/test.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 203b5fa3d3..254fc816f4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,6 +19,8 @@ on: - maint/* schedule: - cron: '0 0 * * 1' + # Allow job to be triggered manually from GitHub interface + workflow_dispatch: defaults: run: From 07289b7c9f2919d7d99d22a2b6a622a47c44a498 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 15:21:31 -0500 Subject: [PATCH 475/702] TEST: Chdir during doctest to avoid polluting the working dir --- nibabel/externals/conftest.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 nibabel/externals/conftest.py diff --git a/nibabel/externals/conftest.py b/nibabel/externals/conftest.py new file mode 100644 index 0000000000..33f88eb323 --- /dev/null +++ b/nibabel/externals/conftest.py @@ -0,0 +1,25 @@ +import pytest + +try: + from contextlib import chdir as _chdir +except ImportError: # PY310 + import os + from contextlib import contextmanager + + @contextmanager # type: ignore + def _chdir(path): + cwd = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(cwd) + + +@pytest.fixture(autouse=True) +def chdir_tmpdir(request, tmp_path): + if request.node.__class__.__name__ == "DoctestItem": + with _chdir(tmp_path): + yield + else: + yield From 4ddefe5eaaa51f5ee9d65e7fb9933a4bc3358463 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Dec 2023 09:07:21 -0500 Subject: [PATCH 476/702] CI: Enable colored output with FORCE_COLOR --- .github/workflows/test.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 254fc816f4..3acb06d33a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,6 +26,10 @@ defaults: run: shell: bash +# Force tox and pytest to use color +env: + FORCE_COLOR: true + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true From fb0ca55bf688c26f77afc93abc12e14e62ad3a04 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Dec 2023 09:09:24 -0500 Subject: [PATCH 477/702] CI: Move to trusted publishing for PyPI uploads --- .github/workflows/test.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3acb06d33a..fc9afdc218 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -199,6 +199,9 @@ jobs: runs-on: ubuntu-latest environment: "Package deployment" needs: [test, test-package] + permissions: + # Required for trusted publishing + id-token: write if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - uses: actions/download-artifact@v3 @@ -206,6 +209,3 @@ jobs: name: dist path: dist/ - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} From 72c3724089c65acc7c4cc2e83b0c8f875ff3bca4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Dec 2023 10:06:54 -0500 Subject: [PATCH 478/702] TOX: Make blue/isort fail on diff --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index cdf7879b2b..4363dbf8ed 100644 --- a/tox.ini +++ b/tox.ini @@ -139,8 +139,8 @@ deps = isort[colors] skip_install = true commands = - blue --diff --color nibabel - isort --diff --color nibabel + blue --check --diff --color nibabel + isort --check --diff --color nibabel flake8 nibabel [testenv:style-fix] From 7efda1bcce21625491a0932684c0e38249f6f3cf Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Dec 2023 10:07:22 -0500 Subject: [PATCH 479/702] STY: Apply blue --- nibabel/nifti1.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index e0bdd20201..4cf1e52748 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1637,9 +1637,7 @@ def set_slice_times(self, slice_times): labels.remove('unknown') matching_labels = [ - label - for label in labels - if np.all(st_order == self._slice_time_order(label, n_timed)) + label for label in labels if np.all(st_order == self._slice_time_order(label, n_timed)) ] if not matching_labels: From 432407f0546bd186974a68c81420d6520c0642fc Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 6 Dec 2023 11:34:52 -0500 Subject: [PATCH 480/702] TOX: Pass color preferences to tools --- tox.ini | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tox.ini b/tox.ini index 4363dbf8ed..17a66b04e4 100644 --- a/tox.ini +++ b/tox.ini @@ -63,6 +63,12 @@ pass_env = USERNAME # Environment variables we check for NIPY_EXTRA_TESTS + # Pass user color preferences through + PY_COLORS + FORCE_COLOR + NO_COLOR + CLICOLOR + CLICOLOR_FORCE extras = test deps = # General minimum dependencies: pin based on API usage From 89cf1cd3023b4a1e5df9bafff59e2cd9d9e39951 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 15:38:32 -0500 Subject: [PATCH 481/702] TOX: Enable pydicom@master for dev test --- tox.ini | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 17a66b04e4..d91c136fc1 100644 --- a/tox.ini +++ b/tox.ini @@ -99,9 +99,8 @@ deps = full,pre,dev: pillow >=8.1 full,pre,dev: indexed_gzip >=1.4 full,pre,dev: pyzstd >=0.14.3 - full,pre,dev: pydicom >=2.1 - # pydicom master seems to be breaking things - # pre: pydicom @ git+https://github.com/pydicom/pydicom.git@main + full,pre: pydicom >=2.1 + dev: pydicom @ git+https://github.com/pydicom/pydicom.git@main commands = pytest --doctest-modules --doctest-plus \ From 652edd9530e353a3aa20cf7b58c33b21cd110f58 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 16:02:35 -0500 Subject: [PATCH 482/702] RF: Replace deprecated pydicom.dicomio.read_file with dcmread --- nibabel/nicom/dicomreaders.py | 2 +- nibabel/nicom/dicomwrappers.py | 6 +++--- nibabel/nicom/tests/test_dicomreaders.py | 2 +- nibabel/nicom/tests/test_dicomwrappers.py | 6 +++--- nibabel/pydicom_compat.py | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 113af967cc..5892bb8db2 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -53,7 +53,7 @@ def read_mosaic_dir(dicom_path, globber='*.dcm', check_is_dwi=False, dicom_kwarg If True, raises an error if we don't find DWI information in the DICOM headers. dicom_kwargs : None or dict - Extra keyword arguments to pass to the pydicom ``read_file`` function. + Extra keyword arguments to pass to the pydicom ``dcmread`` function. Returns ------- diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 572957f391..42d4b1413f 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -44,9 +44,9 @@ def wrapper_from_file(file_like, *args, **kwargs): filename string or file-like object, pointing to a valid DICOM file readable by ``pydicom`` \*args : positional - args to ``dicom.read_file`` command. + args to ``dicom.dcmread`` command. \*\*kwargs : keyword - args to ``dicom.read_file`` command. ``force=True`` might be a + args to ``dicom.dcmread`` command. ``force=True`` might be a likely keyword argument. Returns @@ -55,7 +55,7 @@ def wrapper_from_file(file_like, *args, **kwargs): DICOM wrapper corresponding to DICOM data type """ with ImageOpener(file_like) as fobj: - dcm_data = pydicom.read_file(fobj, *args, **kwargs) + dcm_data = pydicom.dcmread(fobj, *args, **kwargs) return wrapper_from_data(dcm_data) diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index 1e749aced1..17ea7430f2 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -41,7 +41,7 @@ def test_passing_kwds(): # This should not raise an error data2, aff2, bs2, gs2 = func(IO_DATA_PATH, dwi_glob, dicom_kwargs=dict(force=True)) assert_array_equal(data, data2) - # This should raise an error in pydicom.dicomio.read_file + # This should raise an error in pydicom.filereader.dcmread with pytest.raises(TypeError): func(IO_DATA_PATH, dwi_glob, dicom_kwargs=dict(not_a_parameter=True)) # These are invalid dicoms, so will raise an error unless force=True diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 62076c042a..083357537e 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -23,8 +23,8 @@ DATA_FILE = pjoin(IO_DATA_PATH, 'siemens_dwi_1000.dcm.gz') DATA_FILE_PHILIPS = pjoin(IO_DATA_PATH, 'philips_mprage.dcm.gz') if have_dicom: - DATA = pydicom.read_file(gzip.open(DATA_FILE)) - DATA_PHILIPS = pydicom.read_file(gzip.open(DATA_FILE_PHILIPS)) + DATA = pydicom.dcmread(gzip.open(DATA_FILE)) + DATA_PHILIPS = pydicom.dcmread(gzip.open(DATA_FILE_PHILIPS)) else: DATA = None DATA_PHILIPS = None @@ -170,7 +170,7 @@ def test_wrapper_from_data(): @dicom_test def test_wrapper_args_kwds(): - # Test we can pass args, kwargs to read_file + # Test we can pass args, kwargs to dcmread dcm = didw.wrapper_from_file(DATA_FILE) data = dcm.get_data() # Passing in non-default arg for defer_size diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index 4d9df7df7b..ce6f8fe8c3 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -35,7 +35,7 @@ if have_dicom: # Values not imported by default import pydicom.values # type: ignore - from pydicom.dicomio import read_file # noqa:F401 + from pydicom.dicomio import dcmread as read_file # noqa:F401 from pydicom.sequence import Sequence # noqa:F401 tag_for_keyword = pydicom.datadict.tag_for_keyword From 32d9cd356d2ad540fc28e5d2fd7a03e5cb7889b3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 16:07:27 -0500 Subject: [PATCH 483/702] MNT: Deprecate unused pydicom_compat module --- nibabel/pydicom_compat.py | 8 ++++++++ nibabel/tests/test_removalschedule.py | 1 + 2 files changed, 9 insertions(+) diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index ce6f8fe8c3..fae24e691c 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -21,11 +21,19 @@ """ from __future__ import annotations +import warnings from typing import Callable from .deprecated import deprecate_with_version from .optpkg import optional_package +warnings.warn( + "We will remove the 'pydicom_compat' module from nibabel 7.0. " + "Please consult pydicom's documentation for any future needs.", + DeprecationWarning, + stacklevel=2, +) + pydicom, have_dicom, _ = optional_package('pydicom') read_file: Callable | None = None diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index b11a621802..772d395fd4 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -6,6 +6,7 @@ from ..pkg_info import cmp_pkg_version MODULE_SCHEDULE = [ + ('7.0.0', ['nibabel.pydicom_compat']), ('5.0.0', ['nibabel.keywordonly', 'nibabel.py3k']), ('4.0.0', ['nibabel.trackvis']), ('3.0.0', ['nibabel.minc', 'nibabel.checkwarns']), From c367345b98f5ba6e664fbcf30498e7e8f2aa1054 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 16:44:41 -0500 Subject: [PATCH 484/702] FIX: read_file -> dcmread --- nibabel/dft.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/dft.py b/nibabel/dft.py index 7a49d49f52..ee34595b3f 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -238,7 +238,7 @@ def __getattribute__(self, name): return val def dicom(self): - return pydicom.read_file(self.files[0]) + return pydicom.dcmread(self.files[0]) def _get_subdirs(base_dir, files_dict=None, followlinks=False): @@ -347,7 +347,7 @@ def _update_dir(c, dir, files, studies, series, storage_instances): def _update_file(c, path, fname, studies, series, storage_instances): try: - do = pydicom.read_file(f'{path}/{fname}') + do = pydicom.dcmread(f'{path}/{fname}') except pydicom.filereader.InvalidDicomError: logger.debug(' not a DICOM file') return None From ced2b81383d49c38ec293edb82c415b50ae5b3fb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 10:32:21 -0500 Subject: [PATCH 485/702] MNT: Update requirements --- doc-requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc-requirements.txt b/doc-requirements.txt index 64830ca962..42400ea57d 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -1,7 +1,7 @@ # Auto-generated by tools/update_requirements.py -r requirements.txt -matplotlib >= 1.5.3 +sphinx +matplotlib>=1.5.3 numpydoc -sphinx ~= 5.3 texext -tomli; python_version < "3.11" +tomli; python_version < '3.11' From 76d202eb02441e1cf59c7533e3f9b1dd8b1c14aa Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 10:55:45 -0500 Subject: [PATCH 486/702] MNT: Update and simplify mailmap --- .mailmap | 104 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 56 insertions(+), 48 deletions(-) diff --git a/.mailmap b/.mailmap index 80c46f385e..7b5dfa0d43 100644 --- a/.mailmap +++ b/.mailmap @@ -1,79 +1,87 @@ # Prevent git from showing duplicate names with commands like "git shortlog" # See the manpage of git-shortlog for details. # The syntax is: -# Name that should be used Bad name # -# You can skip Bad name if it is the same as the one that should be used, and is unique. +# Good Name [[Bad Name] ] +# +# If multiple names are mapped to the good email, a line without any bad +# emails will consolidate these names. +# Likewise, any name mapped to a bad email will be converted to the good name. +# +# A contributor with three emails and inconsistent names could be mapped like this: +# +# Good Name +# Good Name +# Good Name +# +# If a contributor uses an email that is not unique to them, you will need their +# name. +# +# Good Name +# Good Name Good Name # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. -Alexandre Gramfort Alexandre Gramfort +Alexandre Gramfort Anibal Sólon -Ariel Rokem arokem -B. Nolan Nichols Nolan Nichols -Basile Pinsard bpinsard -Basile Pinsard bpinsard -Ben Cipollini Ben Cipollini +Ariel Rokem +B. Nolan Nichols +Basile Pinsard +Basile Pinsard +Ben Cipollini Benjamin C Darwin -Bertrand Thirion bthirion +Bertrand Thirion Cameron Riddell <31414128+CRiddler@users.noreply.github.com> -Christian Haselgrove Christian Haselgrove -Christopher J. Markiewicz Chris Johnson -Christopher J. Markiewicz Chris Markiewicz -Christopher J. Markiewicz Chris Markiewicz -Christopher J. Markiewicz Christopher J. Markiewicz -Christopher J. Markiewicz Christopher J. Markiewicz -Cindee Madison CindeeM -Cindee Madison cindeem -Demian Wassermann Demian Wassermann +Christian Haselgrove +Christopher J. Markiewicz +Christopher J. Markiewicz +Christopher J. Markiewicz +Cindee Madison +Demian Wassermann Dimitri Papadopoulos Orfanos Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> -Eric Larson Eric89GXL -Eric Larson larsoner +Eric Larson Fabian Perez -Fernando Pérez-García Fernando -Félix C. Morency Felix C. Morency -Félix C. Morency Félix C. Morency -Gael Varoquaux GaelVaroquaux -Gregory R. Lee Gregory R. Lee -Ian Nimmo-Smith Ian Nimmo-Smith -Jaakko Leppäkangas jaeilepp -Jacob Roberts +Fernando Pérez-García +Félix C. Morency +Gael Varoquaux +Gregory R. Lee +Ian Nimmo-Smith +Jaakko Leppäkangas Jacob Roberts -Jakub Kaczmarzyk Jakub Kaczmarzyk -Jasper J.F. van den Bosch Jasper -Jean-Baptiste Poline jbpoline +Jasper J.F. van den Bosch +Jean-Baptiste Poline Jérôme Dockès -Jon Haitz Legarreta Jon Haitz Legarreta Gorroño -Jonathan Daniel +Jon Haitz Legarreta Jonathan Daniel <36337649+jond01@users.noreply.github.com> -Kesshi Jordan kesshijordan -Kevin S. Hahn Kevin S. Hahn -Konstantinos Raktivan constracti -Krish Subramaniam Krish Subramaniam +Kesshi Jordan +Kevin S. Hahn +Konstantinos Raktivan +Krish Subramaniam Krzysztof J. Gorgolewski Krzysztof J. Gorgolewski -Marc-Alexandre Côté Marc-Alexandre Cote +Marc-Alexandre Côté Mathias Goncalves Mathias Goncalves -Matthew Cieslak Matt Cieslak +Mathieu Scheltienne +Matthew Cieslak Michael Hanke Michael Hanke -Michiel Cottaar Michiel Cottaar Michiel Cottaar -Ly Nguyen lxn2 -Oliver P. Hinds ohinds +Ly Nguyen +Oliver P. Hinds Or Duek Oscar Esteban -Paul McCarthy Paul McCarthy +Paul McCarthy +Reinder Vos de Wael Roberto Guidotti Roberto Guidotti -Satrajit Ghosh Satrajit Ghosh -Serge Koudoro skoudoro +Satrajit Ghosh +Serge Koudoro Stephan Gerhard Stephan Gerhard -Thomas Roos Roosted7 -Venkateswara Reddy Reddam R3DDY97 +Thomas Roos +Venkateswara Reddy Reddam +Yaroslav O. Halchenko Yaroslav O. Halchenko -Yaroslav O. Halchenko Yaroslav Halchenko From e6c9d74b0bf47f704e5b9b3d8840c418cbee1930 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 10:57:59 -0500 Subject: [PATCH 487/702] MNT: Update Zenodo ordering --- .zenodo.json | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index b96c102349..6cadd84a7a 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -38,6 +38,11 @@ "name": "Cheng, Christopher P.", "orcid": "0000-0001-9112-9464" }, + { + "affiliation": "University of Washington: Seattle, WA, United States", + "name": "Larson, Eric", + "orcid": "0000-0003-4782-5360" + }, { "affiliation": "Dartmouth College: Hanover, NH, United States", "name": "Halchenko, Yaroslav O.", @@ -48,11 +53,6 @@ "name": "Cottaar, Michiel", "orcid": "0000-0003-4679-7724" }, - { - "affiliation": "University of Washington: Seattle, WA, United States", - "name": "Larson, Eric", - "orcid": "0000-0003-4782-5360" - }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", @@ -81,6 +81,11 @@ "name": "Wang, Hao-Ting", "orcid": "0000-0003-4078-2038" }, + { + "affiliation": "CEA", + "name": "Papadopoulos Orfanos, Dimitri", + "orcid": "0000-0002-1242-8990" + }, { "affiliation": "Harvard University - Psychology", "name": "Kastman, Erik", @@ -108,12 +113,12 @@ "orcid": "0000-0003-0679-1985" }, { - "name": "Madison, Cindee" + "affiliation": "Human Neuroscience Platform, Fondation Campus Biotech Geneva, Geneva, Switzerland", + "name": "Mathieu Scheltienne", + "orcid": "0000-0001-8316-7436" }, { - "affiliation": "CEA", - "name": "Papadopoulos Orfanos, Dimitri", - "orcid": "0000-0002-1242-8990" + "name": "Madison, Cindee" }, { "name": "S\u00f3lon, Anibal" @@ -187,6 +192,9 @@ "name": "Klug, Julian", "orcid": "0000-0002-4849-9811" }, + { + "name": "Vos de Wael, Reinder" + }, { "affiliation": "SRI International", "name": "Nichols, B. Nolan", @@ -238,6 +246,9 @@ { "name": "Nguyen, Ly" }, + { + "name": "Suter, Peter" + }, { "affiliation": "BrainSpec, Boston, MA", "name": "Reddigari, Samir", @@ -277,6 +288,9 @@ { "name": "Fauber, Bennet" }, + { + "name": "Dewey, Blake" + }, { "name": "Perez, Fabian" }, @@ -377,15 +391,7 @@ }, { "name": "freec84" - }, - { - "name": "Suter, Peter" } - { - "affiliation": "Human Neuroscience Platform, Fondation Campus Biotech Geneva, Geneva, Switzerland", - "name": "Mathieu Scheltienne", - "orcid": "0000-0001-8316-7436" - }, ], "keywords": [ "neuroimaging" From b77f1663826f4fbc3dc8352480c25a952feeccf6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 11:01:30 -0500 Subject: [PATCH 488/702] DOC: Add new contributors, insert old contributor --- doc/source/index.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index 65e1aded4c..72c731d25f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -55,6 +55,7 @@ contributed code and discussion (in rough order of appearance): * JB Poline * Basile Pinsard * `Satrajit Ghosh`_ +* Eric Larson * `Nolan Nichols`_ * Ly Nguyen * Philippe Gervais @@ -126,6 +127,9 @@ contributed code and discussion (in rough order of appearance): * Horea Christian * Fabian Perez * Mathieu Scheltienne +* Reinder Vos de Wael +* Peter Suter +* Blake Dewey License reprise =============== From bd8d118b8bf14c47d32c1248c95d01a843cda1d5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 14:50:37 -0500 Subject: [PATCH 489/702] MNT: Remove 3.12rc1 workaround for python/cpython#180111 --- nibabel/openers.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/nibabel/openers.py b/nibabel/openers.py index 9a024680a2..90c7774d12 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -78,12 +78,6 @@ def __init__( mtime=mtime, ) - def seek(self, pos: int, whence: int = 0, /) -> int: - # Work around bug (gh-180111) in Python 3.12rc1, where seeking without - # flushing can cause write of excess null bytes - self.flush() - return super().seek(pos, whence) - def _gzip_open( filename: str, From 3299fc1e271fe3b137ebbb32cd9c7bfd8cce8ea4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 3 Dec 2023 14:54:38 -0500 Subject: [PATCH 490/702] MNT: Update README --- README.rst | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README.rst b/README.rst index 77d6f55311..2043c1d220 100644 --- a/README.rst +++ b/README.rst @@ -123,6 +123,27 @@ For more information on previous releases, see the `release archive`_ or .. _release archive: https://github.com/nipy/NiBabel/releases .. _development changelog: https://nipy.org/nibabel/changelog.html +Testing +======= + +During development, we recommend using tox_ to run nibabel tests:: + + git clone https://github.com/nipy/nibabel.git + cd nibabel + tox + +To test an installed version of nibabel, install the test dependencies +and run pytest_:: + + pip install nibabel[test] + pytest --pyargs nibabel + +For more information, consult the `developer guidelines`_. + +.. _tox: https://tox.wiki +.. _pytest: https://docs.pytest.org +.. _developer guidelines: https://nipy.org/nibabel/devel/devguide.html + Mailing List ============ From 70795b063c48c2a04edbfcb2e97d5429b4bc31c3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 11 Dec 2023 14:48:25 -0500 Subject: [PATCH 491/702] DOC: 5.2.0 changelog --- Changelog | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/Changelog b/Changelog index cb30decc64..06cbf74fdf 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,79 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +5.2.0 (Monday 11 December 2023) +=============================== + +New feature release in the 5.2.x series. + +This release requires a minimum Python of 3.8 and NumPy 1.20, and has been +tested up to Python 3.12 and NumPy 1.26. + +New features +------------ +* Add generic :class:`~nibabel.pointset.Pointset` and regularly spaced + :class:`~nibabel.pointset.NDGrid` data structures in preparation for coordinate + transformation and resampling (pr/1251) (CM, reviewed by Oscar Esteban) + +Enhancements +------------ +* Add :meth:`~nibabel.arrayproxy.ArrayProxy.copy` method to + :class:`~nibabel.arrayproxy.ArrayProxy` (pr/1255) (CM, reviewed by Paul McCarthy) +* Permit :meth:`~nibabel.xmlutils.XmlSerializable.to_xml` to pass keyword + arguments to :meth:`~xml.etree.ElementTree.ElementTree.tostring` (pr/1258) + (CM) +* Allow user expansion (e.g., ``~/...``) in strings passed to functions that + accept paths (pr/1260) (Reinder Vos de Wael, reviewed by CM) +* Expand CIFTI-2 brain structures to permit synonyms (pr/1256) (CM, reviewed + by Mathias Goncalves) +* Annotate :class:`~nibabel.spatialimages.SpatialImage` as accepting + ``affine=None`` argument (pr/1253) (Blake Dewey, reviewed by CM) +* Warn on invalid MINC2 spacing declarations, treat as missing (pr/1237) + (Peter Suter, reviewed by CM) +* Refactor :func:`~nibabel.nicom.utils.find_private_element` for improved + readability and maintainability (pr/1228) (MB, reviewed by CM) + +Bug fixes +--------- +* Resolve test failure related to randomly generated invalid case (pr/1221) (CM) + +Documentation +------------- +* Remove references to NiPy data packages from documentation (pr/1275) + (Dimitri Papadopoulos, reviewed by CM, MB) + +Maintenance +----------- +* Quality of life improvements for CI, including color output and OIDC publishing + (pr/1282) (CM) +* Patch for NumPy 2.0 pre-release compatibility (pr/1250) (Mathieu + Scheltienne and EL, reviewed by CM) +* Add spellchecking to tox, CI and pre-commit (pr/1266) (CM) +* Add py312-dev-x64 environment to Tox to test NumPy 2.0 pre-release + compatibility (pr/1267) (CM, reviewed by EL) +* Resurrect tox configuration to cover development workflows and CI checks + (pr/1262) (CM) +* Updates for Python 3.12 support (pr/1247, pr/1261, pr/1273) (CM) +* Remove uses of deprecated ``numpy.compat.py3k`` module (pr/1243) (Eric + Larson, reviewed by CM) +* Various fixes for typos and style issues detected by Codespell, pyupgrade and + refurb (pr/1263, pr/1269, pr/1270, pr/1271, pr/1276) (Dimitri Papadopoulos, + reviewed by CM) +* Use stable argsorts in PARREC tests to ensure consistent behavior on systems + with AVX512 SIMD instructions and numpy 1.25 (pr/1234) (CM) +* Resolve CodeCov submission failures (pr/1224) (CM) +* Link to logo with full URL to avoid broken links in PyPI (pr/1218) (CM, + reviewed by Zvi Baratz) + +API changes and deprecations +---------------------------- +* The :mod:`nibabel.pydicom_compat` module is deprecated and will be removed + in NiBabel 7.0. (pr/1280) +* The :func:`~nibabel.casting.int_to_float` and :func:`~nibabel.casting.as_int` + functions are no longer needed to work around NumPy deficiencies and have been + deprecated (pr/1272) (CM, reviewed by EL) + + 5.1.0 (Monday 3 April 2023) =========================== From 8bc1af450f92d3bb4105d11f89397b8e87c6b298 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 09:36:36 -0500 Subject: [PATCH 492/702] DOC: Fix references in changelog --- Changelog | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Changelog b/Changelog index 06cbf74fdf..cd3c2b005b 100644 --- a/Changelog +++ b/Changelog @@ -36,7 +36,7 @@ tested up to Python 3.12 and NumPy 1.26. New features ------------ * Add generic :class:`~nibabel.pointset.Pointset` and regularly spaced - :class:`~nibabel.pointset.NDGrid` data structures in preparation for coordinate + :class:`~nibabel.pointset.Grid` data structures in preparation for coordinate transformation and resampling (pr/1251) (CM, reviewed by Oscar Esteban) Enhancements @@ -44,7 +44,7 @@ Enhancements * Add :meth:`~nibabel.arrayproxy.ArrayProxy.copy` method to :class:`~nibabel.arrayproxy.ArrayProxy` (pr/1255) (CM, reviewed by Paul McCarthy) * Permit :meth:`~nibabel.xmlutils.XmlSerializable.to_xml` to pass keyword - arguments to :meth:`~xml.etree.ElementTree.ElementTree.tostring` (pr/1258) + arguments to :meth:`~xml.etree.ElementTree.tostring` (pr/1258) (CM) * Allow user expansion (e.g., ``~/...``) in strings passed to functions that accept paths (pr/1260) (Reinder Vos de Wael, reviewed by CM) @@ -54,7 +54,7 @@ Enhancements ``affine=None`` argument (pr/1253) (Blake Dewey, reviewed by CM) * Warn on invalid MINC2 spacing declarations, treat as missing (pr/1237) (Peter Suter, reviewed by CM) -* Refactor :func:`~nibabel.nicom.utils.find_private_element` for improved +* Refactor :func:`~nibabel.nicom.utils.find_private_section` for improved readability and maintainability (pr/1228) (MB, reviewed by CM) Bug fixes From c9e7795306f7dd6912d6502318129c1dc8056397 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 09:37:39 -0500 Subject: [PATCH 493/702] MNT: Add tool for generating GitHub-friendly release notes --- tools/markdown_release_notes.py | 94 +++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 tools/markdown_release_notes.py diff --git a/tools/markdown_release_notes.py b/tools/markdown_release_notes.py new file mode 100644 index 0000000000..66e7876036 --- /dev/null +++ b/tools/markdown_release_notes.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +import re +import sys +from pathlib import Path + +CHANGELOG = Path(__file__).parent.parent / 'Changelog' + +# Match release lines like "5.2.0 (Monday 11 December 2023)" +RELEASE_REGEX = re.compile(r"""((?:\d+)\.(?:\d+)\.(?:\d+)) \(\w+ \d{1,2} \w+ \d{4}\)$""") + + +def main(): + version = sys.argv[1] + output = sys.argv[2] + if output == '-': + output = sys.stdout + else: + output = open(output, 'w') + + release_notes = [] + in_release_notes = False + + with open(CHANGELOG) as f: + for line in f: + match = RELEASE_REGEX.match(line) + if match: + if in_release_notes: + break + in_release_notes = match.group(1) == version + next(f) # Skip the underline + continue + + if in_release_notes: + release_notes.append(line) + + # Drop empty lines at start and end + while release_notes and not release_notes[0].strip(): + release_notes.pop(0) + while release_notes and not release_notes[-1].strip(): + release_notes.pop() + + # Join lines + release_notes = ''.join(release_notes) + + # Remove line breaks when they are followed by a space + release_notes = re.sub(r'\n +', ' ', release_notes) + + # Replace pr/ with # for GitHub + release_notes = re.sub(r'\(pr/(\d+)\)', r'(#\1)', release_notes) + + # Replace :mod:`package.X` with [package.X](...) + release_notes = re.sub( + r':mod:`nibabel\.(.*)`', + r'[nibabel.\1](https://nipy.org/nibabel/reference/nibabel.\1.html)', + release_notes, + ) + # Replace :class/func/attr:`package.module.X` with [package.module.X](...) + release_notes = re.sub( + r':(?:class|func|attr):`(nibabel\.\w*)(\.[\w.]*)?\.(\w+)`', + r'[\1\2.\3](https://nipy.org/nibabel/reference/\1.html#\1\2.\3)', + release_notes, + ) + release_notes = re.sub( + r':(?:class|func|attr):`~(nibabel\.\w*)(\.[\w.]*)?\.(\w+)`', + r'[\3](https://nipy.org/nibabel/reference/\1.html#\1\2.\3)', + release_notes, + ) + # Replace :meth:`package.module.class.X` with [package.module.class.X](...) + release_notes = re.sub( + r':meth:`(nibabel\.[\w.]*)\.(\w+)\.(\w+)`', + r'[\1.\2.\3](https://nipy.org/nibabel/reference/\1.html#\1.\2.\3)', + release_notes, + ) + release_notes = re.sub( + r':meth:`~(nibabel\.[\w.]*)\.(\w+)\.(\w+)`', + r'[\3](https://nipy.org/nibabel/reference/\1.html#\1.\2.\3)', + release_notes, + ) + + def python_doc(match): + module = match.group(1) + name = match.group(2) + return f'[{name}](https://docs.python.org/3/library/{module.lower()}.html#{module}.{name})' + + release_notes = re.sub(r':meth:`~([\w.]+)\.(\w+)`', python_doc, release_notes) + + output.write('## Release notes\n\n') + output.write(release_notes) + + output.close() + + +if __name__ == '__main__': + main() From 33363bfa49ce3b2417ed0d5b456a0b919571185d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 09:48:41 -0500 Subject: [PATCH 494/702] MNT: Avoid isort version with broken extras --- tox.ini | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index d91c136fc1..cc2b263cb1 100644 --- a/tox.ini +++ b/tox.ini @@ -141,7 +141,8 @@ labels = check deps = flake8 blue - isort[colors] + # Broken extras, remove when fix is released + isort[colors]!=5.13.1 skip_install = true commands = blue --check --diff --color nibabel @@ -153,7 +154,7 @@ description = Auto-apply style guide to the extent possible labels = pre-release deps = blue - isort[colors] + isort skip_install = true commands = blue nibabel From 773e3c40eebf072630abbc26a30d3ad67adf5e90 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 10:54:07 -0500 Subject: [PATCH 495/702] DOC: Fix intersphinx mapping and reference type --- Changelog | 4 ++-- doc/source/conf.py | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Changelog b/Changelog index cd3c2b005b..10afc42df8 100644 --- a/Changelog +++ b/Changelog @@ -43,8 +43,8 @@ Enhancements ------------ * Add :meth:`~nibabel.arrayproxy.ArrayProxy.copy` method to :class:`~nibabel.arrayproxy.ArrayProxy` (pr/1255) (CM, reviewed by Paul McCarthy) -* Permit :meth:`~nibabel.xmlutils.XmlSerializable.to_xml` to pass keyword - arguments to :meth:`~xml.etree.ElementTree.tostring` (pr/1258) +* Permit :meth:`~nibabel.xmlutils.XmlSerializable.to_xml` methods to pass keyword + arguments to :func:`xml.etree.ElementTree.tostring` (pr/1258) (CM) * Allow user expansion (e.g., ``~/...``) in strings passed to functions that accept paths (pr/1260) (Reinder Vos de Wael, reviewed by CM) diff --git a/doc/source/conf.py b/doc/source/conf.py index 82fe25adac..175c6340bd 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -280,7 +280,12 @@ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/3/': None} +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'scipy': ('https://docs.scipy.org/doc/scipy', None), + 'matplotlib': ('https://matplotlib.org/stable', None), +} # Config of plot_directive plot_include_source = True From f7b9bc4c89f9bfb9e31763e3b2d672016d6d8f33 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 11:13:10 -0500 Subject: [PATCH 496/702] MNT: Advertise Python 3.12 support --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 50905dff56..9fec3975cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering", ] # Version from setuptools_scm From 46a765d162239e131c4db7d573f9bf9a05b3c3f1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 21:55:20 -0500 Subject: [PATCH 497/702] FIX: Tolerate missing git Closes gh-1285. --- nibabel/pkg_info.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 7e816939d5..7232806a0a 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -1,6 +1,7 @@ from __future__ import annotations import sys +from contextlib import suppress from subprocess import run from packaging.version import Version @@ -102,14 +103,16 @@ def pkg_commit_hash(pkg_path: str | None = None) -> tuple[str, str]: ver = Version(__version__) if ver.local is not None and ver.local.startswith('g'): return 'installation', ver.local[1:8] - # maybe we are in a repository - proc = run( - ('git', 'rev-parse', '--short', 'HEAD'), - capture_output=True, - cwd=pkg_path, - ) - if proc.stdout: - return 'repository', proc.stdout.decode().strip() + # maybe we are in a repository, but consider that we may not have git + with suppress(FileNotFoundError): + proc = run( + ('git', 'rev-parse', '--short', 'HEAD'), + capture_output=True, + cwd=pkg_path, + ) + if proc.stdout: + return 'repository', proc.stdout.decode().strip() + return '(none found)', '' From 1ec84885bc40ea459252fb74e45945f25bd804f1 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 26 Dec 2023 00:12:12 +0100 Subject: [PATCH 498/702] MNT: Apply Repo-Review suggestions --- nibabel/_compression.py | 4 ++-- nibabel/benchmarks/bench_arrayproxy_slicing.py | 2 +- nibabel/cmdline/dicomfs.py | 2 +- nibabel/externals/conftest.py | 2 +- nibabel/minc2.py | 2 +- nibabel/parrec.py | 2 +- nibabel/pydicom_compat.py | 2 +- nibabel/spm99analyze.py | 2 +- nibabel/tmpdirs.py | 2 +- nibabel/xmlutils.py | 2 +- pyproject.toml | 4 +++- 11 files changed, 14 insertions(+), 12 deletions(-) diff --git a/nibabel/_compression.py b/nibabel/_compression.py index bf13895c80..75a5e3bbf4 100644 --- a/nibabel/_compression.py +++ b/nibabel/_compression.py @@ -17,7 +17,7 @@ from .optpkg import optional_package if ty.TYPE_CHECKING: # pragma: no cover - import indexed_gzip # type: ignore + import indexed_gzip # type: ignore[import-not-found] import pyzstd HAVE_INDEXED_GZIP = True @@ -40,7 +40,7 @@ if HAVE_INDEXED_GZIP: COMPRESSED_FILE_LIKES += (indexed_gzip.IndexedGzipFile,) COMPRESSION_ERRORS += (indexed_gzip.ZranError,) - from indexed_gzip import IndexedGzipFile # type: ignore + from indexed_gzip import IndexedGzipFile # type: ignore[import-not-found] else: IndexedGzipFile = gzip.GzipFile diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 958923d7ea..dc9acfdedd 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -26,7 +26,7 @@ # if memory_profiler is installed, we get memory usage results try: - from memory_profiler import memory_usage # type: ignore + from memory_profiler import memory_usage # type: ignore[import-not-found] except ImportError: memory_usage = None diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 85d7d8dcad..dec4011c51 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -25,7 +25,7 @@ class dummy_fuse: try: - import fuse # type: ignore + import fuse # type: ignore[import-not-found] uid = os.getuid() gid = os.getgid() diff --git a/nibabel/externals/conftest.py b/nibabel/externals/conftest.py index 33f88eb323..472f2f0296 100644 --- a/nibabel/externals/conftest.py +++ b/nibabel/externals/conftest.py @@ -6,7 +6,7 @@ import os from contextlib import contextmanager - @contextmanager # type: ignore + @contextmanager # type: ignore[no-redef] def _chdir(path): cwd = os.getcwd() os.chdir(path) diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 3096ef9499..94e1be76e2 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -163,7 +163,7 @@ class Minc2Image(Minc1Image): def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Import of h5py might take awhile for MPI-enabled builds # So we are importing it here "on demand" - import h5py # type: ignore + import h5py # type: ignore[import-not-found] holder = file_map['image'] if holder.filename is None: diff --git a/nibabel/parrec.py b/nibabel/parrec.py index ec3fdea711..3a8a6030de 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -1338,7 +1338,7 @@ def from_filename( strict_sort=strict_sort, ) - load = from_filename # type: ignore + load = from_filename # type: ignore[assignment] load = PARRECImage.from_filename diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index fae24e691c..d61c880117 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -42,7 +42,7 @@ if have_dicom: # Values not imported by default - import pydicom.values # type: ignore + import pydicom.values # type: ignore[import-not-found] from pydicom.dicomio import dcmread as read_file # noqa:F401 from pydicom.sequence import Sequence # noqa:F401 diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 974f8609cf..c859d702f4 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -275,7 +275,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): contents = matf.read() if len(contents) == 0: return ret - import scipy.io as sio # type: ignore + import scipy.io as sio # type: ignore[import-not-found] mats = sio.loadmat(BytesIO(contents)) if 'mat' in mats: # this overrides a 'M', and includes any flip diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 7fe47e6510..49d69d2bf2 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -15,7 +15,7 @@ from contextlib import chdir as _chdir except ImportError: # PY310 - @contextmanager # type: ignore + @contextmanager # type: ignore[no-redef] def _chdir(path): cwd = os.getcwd() os.chdir(path) diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 4a5fb28979..d3a7a08309 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -32,7 +32,7 @@ def to_xml(self, enc='utf-8', **kwargs) -> bytes: Additional keyword arguments to :func:`xml.etree.ElementTree.tostring`. """ ele = self._to_xml_element() - return b'' if ele is None else tostring(ele, enc, **kwargs) + return tostring(ele, enc, **kwargs) class XmlBasedHeader(FileBasedHeader, XmlSerializable): diff --git a/pyproject.toml b/pyproject.toml index 9fec3975cc..14095b8f22 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -111,7 +111,7 @@ __version_tuple__ = version_tuple = {version_tuple!r} [tool.blue] line_length = 99 -target-version = ["py37"] +target-version = ["py38"] force-exclude = """ ( _version.py @@ -130,6 +130,8 @@ python_version = "3.11" exclude = [ "/tests", ] +warn_unreachable = true +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] [tool.codespell] skip = "*/data/*,./nibabel-data" From cff32bbcc2c32defe176aebb00150331a18ed3c3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 26 Dec 2023 07:34:24 -0600 Subject: [PATCH 499/702] MNT: Purge defunct nisext package --- .coveragerc | 4 +- Makefile | 21 +- nisext/__init__.py | 13 - nisext/py3builder.py | 38 --- nisext/sexts.py | 285 ------------------- nisext/testers.py | 523 ----------------------------------- nisext/tests/__init__.py | 1 - nisext/tests/test_sexts.py | 106 ------- nisext/tests/test_testers.py | 35 --- pyproject.toml | 2 +- 10 files changed, 4 insertions(+), 1024 deletions(-) delete mode 100644 nisext/__init__.py delete mode 100644 nisext/py3builder.py delete mode 100644 nisext/sexts.py delete mode 100644 nisext/testers.py delete mode 100644 nisext/tests/__init__.py delete mode 100644 nisext/tests/test_sexts.py delete mode 100644 nisext/tests/test_testers.py diff --git a/.coveragerc b/.coveragerc index 57747ec0d8..bcf28e09c2 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,7 +1,7 @@ [run] branch = True -source = nibabel, nisext -include = */nibabel/*, */nisext/* +source = nibabel +include = */nibabel/* omit = */externals/* */benchmarks/* diff --git a/Makefile b/Makefile index 7d4c6666ae..689ad6a75f 100644 --- a/Makefile +++ b/Makefile @@ -233,25 +233,6 @@ bdist_rpm: bdist_mpkg: $(PYTHON) tools/mpkg_wrapper.py setup.py install -# Check for files not installed -check-files: - $(PYTHON) -c 'from nisext.testers import check_files; check_files("nibabel")' - -# Print out info for possible install methods -check-version-info: - $(PYTHON) -c 'from nisext.testers import info_from_here; info_from_here("nibabel")' - -# Run tests from installed code -installed-tests: - $(PYTHON) -c 'from nisext.testers import tests_installed; tests_installed("nibabel")' - -# Run tests from packaged distributions -sdist-tests: - $(PYTHON) -c 'from nisext.testers import sdist_tests; sdist_tests("nibabel", doctests=False)' - -bdist-egg-tests: - $(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel", doctests=False, label="not script_test")' - sdist-venv: clean rm -rf dist venv unset PYTHONPATH && $(PYTHON) setup.py sdist --formats=zip @@ -260,7 +241,7 @@ sdist-venv: clean mkdir venv/tmp cd venv/tmp && unzip ../../dist/*.zip . venv/bin/activate && cd venv/tmp/nibabel* && python setup.py install - unset PYTHONPATH && . venv/bin/activate && cd venv && nosetests --with-doctest nibabel nisext + unset PYTHONPATH && . venv/bin/activate && cd venv && pytest --doctest-modules --doctest-plus --pyargs nibabel source-release: distclean $(PYTHON) -m compileall . diff --git a/nisext/__init__.py b/nisext/__init__.py deleted file mode 100644 index 6b19d7eb8e..0000000000 --- a/nisext/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# init for sext package -"""Setuptools extensions - -nibabel uses these routines, and houses them, and installs them. nipy-proper -and dipy use them. -""" - -import warnings - -warnings.warn( - """The nisext package is deprecated as of NiBabel 5.0 and will be fully -removed in NiBabel 6.0""" -) diff --git a/nisext/py3builder.py b/nisext/py3builder.py deleted file mode 100644 index 24bd298364..0000000000 --- a/nisext/py3builder.py +++ /dev/null @@ -1,38 +0,0 @@ -"""distutils utilities for porting to python 3 within 2-compatible tree""" - - -try: - from distutils.command.build_py import build_py_2to3 -except ImportError: - # 2.x - no parsing of code - from distutils.command.build_py import build_py -else: # Python 3 - # Command to also apply 2to3 to doctests - from distutils import log - - class build_py(build_py_2to3): - def run_2to3(self, files): - # Add doctest parsing; this stuff copied from distutils.utils in - # python 3.2 source - if not files: - return - fixer_names, options, explicit = (self.fixer_names, self.options, self.explicit) - # Make this class local, to delay import of 2to3 - from lib2to3.refactor import RefactoringTool, get_fixers_from_package - - class DistutilsRefactoringTool(RefactoringTool): - def log_error(self, msg, *args, **kw): - log.error(msg, *args) - - def log_message(self, msg, *args): - log.info(msg, *args) - - def log_debug(self, msg, *args): - log.debug(msg, *args) - - if fixer_names is None: - fixer_names = get_fixers_from_package('lib2to3.fixes') - r = DistutilsRefactoringTool(fixer_names, options=options) - r.refactor(files, write=True) - # Then doctests - r.refactor(files, write=True, doctests_only=True) diff --git a/nisext/sexts.py b/nisext/sexts.py deleted file mode 100644 index b206588dec..0000000000 --- a/nisext/sexts.py +++ /dev/null @@ -1,285 +0,0 @@ -"""Distutils / setuptools helpers""" - -import os -from configparser import ConfigParser -from distutils import log -from distutils.command.build_py import build_py -from distutils.command.install_scripts import install_scripts -from distutils.version import LooseVersion -from os.path import join as pjoin -from os.path import split as psplit -from os.path import splitext - - -def get_comrec_build(pkg_dir, build_cmd=build_py): - """Return extended build command class for recording commit - - The extended command tries to run git to find the current commit, getting - the empty string if it fails. It then writes the commit hash into a file - in the `pkg_dir` path, named ``COMMIT_INFO.txt``. - - In due course this information can be used by the package after it is - installed, to tell you what commit it was installed from if known. - - To make use of this system, you need a package with a COMMIT_INFO.txt file - - e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this:: - - # This is an ini file that may contain information about the code state - [commit hash] - # The line below may contain a valid hash if it has been substituted during 'git archive' - archive_subst_hash=$Format:%h$ - # This line may be modified by the install process - install_hash= - - The COMMIT_INFO file above is also designed to be used with git substitution - - so you probably also want a ``.gitattributes`` file in the root directory - of your working tree that contains something like this:: - - myproject/COMMIT_INFO.txt export-subst - - That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git - archive`` - useful in case someone makes such an archive - for example with - via the github 'download source' button. - - Although all the above will work as is, you might consider having something - like a ``get_info()`` function in your package to display the commit - information at the terminal. See the ``pkg_info.py`` module in the nipy - package for an example. - """ - - class MyBuildPy(build_cmd): - """Subclass to write commit data into installation tree""" - - def run(self): - build_cmd.run(self) - import subprocess - - proc = subprocess.Popen( - 'git rev-parse --short HEAD', - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, - ) - repo_commit, _ = proc.communicate() - # Fix for python 3 - repo_commit = str(repo_commit) - # We write the installation commit even if it's empty - cfg_parser = ConfigParser() - cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt')) - cfg_parser.set('commit hash', 'install_hash', repo_commit) - out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt') - cfg_parser.write(open(out_pth, 'wt')) - - return MyBuildPy - - -def _add_append_key(in_dict, key, value): - """Helper for appending dependencies to setuptools args""" - # If in_dict[key] does not exist, create it - # If in_dict[key] is a string, make it len 1 list of strings - # Append value to in_dict[key] list - if key not in in_dict: - in_dict[key] = [] - elif isinstance(in_dict[key], str): - in_dict[key] = [in_dict[key]] - in_dict[key].append(value) - - -# Dependency checks -def package_check( - pkg_name, - version=None, - optional=False, - checker=LooseVersion, - version_getter=None, - messages=None, - setuptools_args=None, -): - """Check if package `pkg_name` is present and has good enough version - - Has two modes of operation. If `setuptools_args` is None (the default), - raise an error for missing non-optional dependencies and log warnings for - missing optional dependencies. If `setuptools_args` is a dict, then fill - ``install_requires`` key value with any missing non-optional dependencies, - and the ``extras_requires`` key value with optional dependencies. - - This allows us to work with and without setuptools. It also means we can - check for packages that have not been installed with setuptools to avoid - installing them again. - - Parameters - ---------- - pkg_name : str - name of package as imported into python - version : {None, str}, optional - minimum version of the package that we require. If None, we don't - check the version. Default is None - optional : bool or str, optional - If ``bool(optional)`` is False, raise error for absent package or wrong - version; otherwise warn. If ``setuptools_args`` is not None, and - ``bool(optional)`` is not False, then `optional` should be a string - giving the feature name for the ``extras_require`` argument to setup. - checker : callable, optional - callable with which to return comparable thing from version - string. Default is ``distutils.version.LooseVersion`` - version_getter : {None, callable}: - Callable that takes `pkg_name` as argument, and returns the - package version string - as in:: - - ``version = version_getter(pkg_name)`` - - If None, equivalent to:: - - mod = __import__(pkg_name); version = mod.__version__`` - messages : None or dict, optional - dictionary giving output messages - setuptools_args : None or dict - If None, raise errors / warnings for missing non-optional / optional - dependencies. If dict fill key values ``install_requires`` and - ``extras_require`` for non-optional and optional dependencies. - """ - setuptools_mode = not setuptools_args is None - optional_tf = bool(optional) - if version_getter is None: - - def version_getter(pkg_name): - mod = __import__(pkg_name) - return mod.__version__ - - if messages is None: - messages = {} - msgs = { - 'missing': 'Cannot import package "%s" - is it installed?', - 'missing opt': 'Missing optional package "%s"', - 'opt suffix': '; you may get run-time errors', - 'version too old': 'You have version %s of package "%s" but we need version >= %s', - } - msgs.update(messages) - status, have_version = _package_status(pkg_name, version, version_getter, checker) - if status == 'satisfied': - return - if not setuptools_mode: - if status == 'missing': - if not optional_tf: - raise RuntimeError(msgs['missing'] % pkg_name) - log.warn(msgs['missing opt'] % pkg_name + msgs['opt suffix']) - return - elif status == 'no-version': - raise RuntimeError(f'Cannot find version for {pkg_name}') - assert status == 'low-version' - if not optional_tf: - raise RuntimeError(msgs['version too old'] % (have_version, pkg_name, version)) - log.warn(msgs['version too old'] % (have_version, pkg_name, version) + msgs['opt suffix']) - return - # setuptools mode - if optional_tf and not isinstance(optional, str): - raise RuntimeError('Not-False optional arg should be string') - dependency = pkg_name - if version: - dependency += '>=' + version - if optional_tf: - if not 'extras_require' in setuptools_args: - setuptools_args['extras_require'] = {} - _add_append_key(setuptools_args['extras_require'], optional, dependency) - else: - _add_append_key(setuptools_args, 'install_requires', dependency) - - -def _package_status(pkg_name, version, version_getter, checker): - try: - __import__(pkg_name) - except ImportError: - return 'missing', None - if not version: - return 'satisfied', None - try: - have_version = version_getter(pkg_name) - except AttributeError: - return 'no-version', None - if checker(have_version) < checker(version): - return 'low-version', have_version - return 'satisfied', have_version - - -BAT_TEMPLATE = r"""@echo off -REM wrapper to use shebang first line of {FNAME} -set mypath=%~dp0 -set pyscript="%mypath%{FNAME}" -set /p line1=<%pyscript% -if "%line1:~0,2%" == "#!" (goto :goodstart) -echo First line of %pyscript% does not start with "#!" -exit /b 1 -:goodstart -set py_exe=%line1:~2% -call "%py_exe%" %pyscript% %* -""" - - -class install_scripts_bat(install_scripts): - """Make scripts executable on Windows - - Scripts are bare file names without extension on Unix, fitting (for example) - Debian rules. They identify as python scripts with the usual ``#!`` first - line. Unix recognizes and uses this first "shebang" line, but Windows does - not. So, on Windows only we add a ``.bat`` wrapper of name - ``bare_script_name.bat`` to call ``bare_script_name`` using the python - interpreter from the #! first line of the script. - - Notes - ----- - See discussion at - https://matthew-brett.github.io/pydagogue/installing_scripts.html and - example at git://github.com/matthew-brett/myscripter.git for more - background. - """ - - def run(self): - install_scripts.run(self) - if not os.name == 'nt': - return - for filepath in self.get_outputs(): - # If we can find an executable name in the #! top line of the script - # file, make .bat wrapper for script. - with open(filepath, 'rt') as fobj: - first_line = fobj.readline() - if not (first_line.startswith('#!') and 'python' in first_line.lower()): - log.info('No #!python executable found, skipping .bat wrapper') - continue - pth, fname = psplit(filepath) - froot, ext = splitext(fname) - bat_file = pjoin(pth, froot + '.bat') - bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname) - log.info(f'Making {bat_file} wrapper for {filepath}') - if self.dry_run: - continue - with open(bat_file, 'wt') as fobj: - fobj.write(bat_contents) - - -class Bunch: - def __init__(self, vars): - for key, name in vars.items(): - if key.startswith('__'): - continue - self.__dict__[key] = name - - -def read_vars_from(ver_file): - """Read variables from Python text file - - Parameters - ---------- - ver_file : str - Filename of file to read - - Returns - ------- - info_vars : Bunch instance - Bunch object where variables read from `ver_file` appear as - attributes - """ - # Use exec for compabibility with Python 3 - ns = {} - with open(ver_file, 'rt') as fobj: - exec(fobj.read(), ns) - return Bunch(ns) diff --git a/nisext/testers.py b/nisext/testers.py deleted file mode 100644 index 07f71af696..0000000000 --- a/nisext/testers.py +++ /dev/null @@ -1,523 +0,0 @@ -"""Test package information in various install settings - -The routines here install the package from source directories, zips or eggs, and -check these installations by running tests, checking version information, -looking for files that were not copied over. - -The typical use for this module is as a Makefile target. For example, here are -the Makefile targets from nibabel:: - - # Check for files not installed - check-files: - $(PYTHON) -c 'from nisext.testers import check_files; check_files("nibabel")' - - # Print out info for possible install methods - check-version-info: - $(PYTHON) -c 'from nisext.testers import info_from_here; info_from_here("nibabel")' - - # Run tests from installed code - installed-tests: - $(PYTHON) -c 'from nisext.testers import tests_installed; tests_installed("nibabel")' - - # Run tests from installed code - sdist-tests: - $(PYTHON) -c 'from nisext.testers import sdist_tests; sdist_tests("nibabel")' - - # Run tests from binary egg - bdist-egg-tests: - $(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel")' -""" - - -import os -import re -import shutil -import sys -import tempfile -import zipfile -from glob import glob -from os.path import abspath -from os.path import join as pjoin -from subprocess import PIPE, Popen - -NEEDS_SHELL = os.name != 'nt' -PYTHON = sys.executable -HAVE_PUTENV = hasattr(os, 'putenv') - -PY_LIB_SDIR = 'pylib' - - -def back_tick(cmd, ret_err=False, as_str=True): - """Run command `cmd`, return stdout, or stdout, stderr if `ret_err` - - Roughly equivalent to ``check_output`` in Python 2.7 - - Parameters - ---------- - cmd : str - command to execute - ret_err : bool, optional - If True, return stderr in addition to stdout. If False, just return - stdout - as_str : bool, optional - Whether to decode outputs to unicode string on exit. - - Returns - ------- - out : str or tuple - If `ret_err` is False, return stripped string containing stdout from - `cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where - ``stdout`` is the stripped stdout, and ``stderr`` is the stripped - stderr. - - Raises - ------ - RuntimeError - if command returns non-zero exit code. - """ - proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=NEEDS_SHELL) - out, err = proc.communicate() - retcode = proc.returncode - if retcode is None: - proc.terminate() - raise RuntimeError(cmd + ' process did not terminate') - if retcode != 0: - raise RuntimeError(cmd + ' process returned code %d' % retcode) - out = out.strip() - if as_str: - out = out.decode('latin-1') - if not ret_err: - return out - err = err.strip() - if as_str: - err = err.decode('latin-1') - return out, err - - -def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): - """Run command in own process in anonymous path - - Parameters - ---------- - mod_name : str - Name of module to import - e.g. 'nibabel' - pkg_path : str - directory containing `mod_name` package. Typically that will be the - directory containing the e.g. 'nibabel' directory. - cmd : str - Python command to execute - script_dir : None or str, optional - script directory to prepend to PATH - print_location : bool, optional - Whether to print the location of the imported `mod_name` - - Returns - ------- - stdout : str - stdout as str - stderr : str - stderr as str - """ - if script_dir is None: - paths_add = '' - else: - if not HAVE_PUTENV: - raise RuntimeError('We cannot set environment variables') - # Need to add the python path for the scripts to pick up our package in - # their environment, because the scripts will get called via the shell - # (via `cmd`). Consider that PYTHONPATH may not be set. Because the - # command might run scripts via the shell, prepend script_dir to the - # system path also. - paths_add = r""" -os.environ['PATH'] = r'"{script_dir}"' + os.path.pathsep + os.environ['PATH'] -PYTHONPATH = os.environ.get('PYTHONPATH') -if PYTHONPATH is None: - os.environ['PYTHONPATH'] = r'"{pkg_path}"' -else: - os.environ['PYTHONPATH'] = r'"{pkg_path}"' + os.path.pathsep + PYTHONPATH -""".format( - **locals() - ) - if print_location: - p_loc = f'print({mod_name}.__file__);' - else: - p_loc = '' - cwd = os.getcwd() - tmpdir = tempfile.mkdtemp() - try: - os.chdir(tmpdir) - with open('script.py', 'wt') as fobj: - fobj.write( - r""" -import os -import sys -sys.path.insert(0, r"{pkg_path}") -{paths_add} -import {mod_name} -{p_loc} -{cmd}""".format( - **locals() - ) - ) - res = back_tick(f'{PYTHON} script.py', ret_err=True) - finally: - os.chdir(cwd) - shutil.rmtree(tmpdir) - return res - - -def zip_extract_all(fname, path=None): - """Extract all members from zipfile - - Deals with situation where the directory is stored in the zipfile as a name, - as well as files that have to go into this directory. - """ - zf = zipfile.ZipFile(fname) - members = zf.namelist() - # Remove members that are just bare directories - members = [m for m in members if not m.endswith('/')] - for zipinfo in members: - zf.extract(zipinfo, path, None) - - -def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): - """Install package in `from_dir` to standard location in `to_dir` - - Parameters - ---------- - from_dir : str - path containing files to install with ``python setup.py ...`` - to_dir : str - prefix path to which files will be installed, as in ``python setup.py - install --prefix=to_dir`` - py_lib_sdir : str, optional - subdirectory within `to_dir` to which library code will be installed - bin_sdir : str, optional - subdirectory within `to_dir` to which scripts will be installed - """ - site_pkgs_path = os.path.join(to_dir, py_lib_sdir) - py_lib_locs = f' --install-purelib={site_pkgs_path} ' f'--install-platlib={site_pkgs_path}' - pwd = os.path.abspath(os.getcwd()) - cmd = f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}' - try: - os.chdir(from_dir) - back_tick(cmd) - finally: - os.chdir(pwd) - - -def install_from_zip( - zip_fname, install_path, pkg_finder=None, py_lib_sdir=PY_LIB_SDIR, script_sdir='bin' -): - """Install package from zip file `zip_fname` - - Parameters - ---------- - zip_fname : str - filename of zip file containing package code - install_path : str - output prefix at which to install package - pkg_finder : None or callable, optional - If None, assume zip contains ``setup.py`` at the top level. Otherwise, - find directory containing ``setup.py`` with ``pth = - pkg_finder(unzip_path)`` where ``unzip_path`` is the path to which we - have unzipped the zip file contents. - py_lib_sdir : str, optional - subdirectory to which to write the library code from the package. Thus - if package called ``nibabel``, the written code will be in - ``//nibabel - script_sdir : str, optional - subdirectory to which we write the installed scripts. Thus scripts will - be written to ``/ - """ - unzip_path = tempfile.mkdtemp() - try: - # Zip may unpack module into current directory - zip_extract_all(zip_fname, unzip_path) - if pkg_finder is None: - from_path = unzip_path - else: - from_path = pkg_finder(unzip_path) - install_from_to(from_path, install_path, py_lib_sdir, script_sdir) - finally: - shutil.rmtree(unzip_path) - - -def contexts_print_info(mod_name, repo_path, install_path): - """Print result of get_info from different installation routes - - Runs installation from: - - * git archive zip file - * with setup.py install from repository directory - * just running code from repository directory - - and prints out result of get_info in each case. There will be many files - written into `install_path` that you may want to clean up somehow. - - Parameters - ---------- - mod_name : str - package name that will be installed, and tested - repo_path : str - path to location of git repository - install_path : str - path into which to install temporary installations - """ - site_pkgs_path = os.path.join(install_path, PY_LIB_SDIR) - # first test archive - pwd = os.path.abspath(os.getcwd()) - out_fname = pjoin(install_path, 'test.zip') - try: - os.chdir(repo_path) - back_tick(f'git archive --format zip -o {out_fname} HEAD') - finally: - os.chdir(pwd) - install_from_zip(out_fname, install_path, None) - cmd_str = f'print({mod_name}.get_info())' - print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0]) - # now test install into a directory from the repository - install_from_to(repo_path, install_path, PY_LIB_SDIR) - print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0]) - # test from development tree - print(run_mod_cmd(mod_name, repo_path, cmd_str)[0]) - - -def info_from_here(mod_name): - """Run info context checks starting in working directory - - Runs checks from current working directory, installing temporary - installations into a new temporary directory - - Parameters - ---------- - mod_name : str - package name that will be installed, and tested - """ - repo_path = os.path.abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - try: - contexts_print_info(mod_name, repo_path, install_path) - finally: - shutil.rmtree(install_path) - - -def tests_installed(mod_name, source_path=None): - """Install from `source_path` into temporary directory; run tests - - Parameters - ---------- - mod_name : str - name of module - e.g. 'nibabel' - source_path : None or str - Path from which to install. If None, defaults to working directory - """ - if source_path is None: - source_path = os.path.abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - site_pkgs_path = pjoin(install_path, PY_LIB_SDIR) - scripts_path = pjoin(install_path, 'bin') - try: - install_from_to(source_path, install_path, PY_LIB_SDIR, 'bin') - stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, mod_name + '.test()', scripts_path) - finally: - shutil.rmtree(install_path) - print(stdout) - print(stderr) - - -# Tell nose this is not a test -tests_installed.__test__ = False - - -def check_installed_files(repo_mod_path, install_mod_path): - """Check files in `repo_mod_path` are installed at `install_mod_path` - - At the moment, all this does is check that all the ``*.py`` files in - `repo_mod_path` are installed at `install_mod_path`. - - Parameters - ---------- - repo_mod_path : str - repository path containing package files, e.g. /nibabel> - install_mod_path : str - path at which package has been installed. This is the path where the - root package ``__init__.py`` lives. - - Return - ------ - uninstalled : list - list of files that should have been installed, but have not been - installed - """ - return missing_from(repo_mod_path, install_mod_path, filter=r'\.py$') - - -def missing_from(path0, path1, filter=None): - """Return filenames present in `path0` but not in `path1` - - Parameters - ---------- - path0 : str - path which contains all files of interest - path1 : str - path which should contain all files of interest - filter : None or str or regexp, optional - A successful result from ``filter.search(fname)`` means the file is of - interest. None means all files are of interest - - Returns - ------- - path1_missing : list - list of all files missing from `path1` that are in `path0` at the same - relative path. - """ - if not filter is None: - filter = re.compile(filter) - uninstalled = [] - # Walk directory tree to get py files - for dirpath, dirnames, filenames in os.walk(path0): - out_dirpath = dirpath.replace(path0, path1) - for fname in filenames: - if not filter is None and filter.search(fname) is None: - continue - equiv_fname = os.path.join(out_dirpath, fname) - if not os.path.isfile(equiv_fname): - uninstalled.append(pjoin(dirpath, fname)) - return uninstalled - - -def check_files(mod_name, repo_path=None, scripts_sdir='bin'): - """Print library and script files not picked up during install""" - if repo_path is None: - repo_path = abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - repo_mod_path = pjoin(repo_path, mod_name) - installed_mod_path = pjoin(install_path, PY_LIB_SDIR, mod_name) - repo_bin = pjoin(repo_path, 'bin') - installed_bin = pjoin(install_path, 'bin') - try: - zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip') - pf = get_sdist_finder(mod_name) - install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, scripts_sdir) - lib_misses = missing_from(repo_mod_path, installed_mod_path, r'\.py$') - script_misses = missing_from(repo_bin, installed_bin) - finally: - shutil.rmtree(install_path) - if lib_misses: - print('Missed library files: ', ', '.join(lib_misses)) - else: - print('You got all the library files') - if script_misses: - print('Missed script files: ', ', '.join(script_misses)) - else: - print('You got all the script files') - return len(lib_misses) > 0 or len(script_misses) > 0 - - -def get_sdist_finder(mod_name): - """Return function finding sdist source directory for `mod_name`""" - - def pf(pth): - pkg_dirs = glob(pjoin(pth, mod_name + '-*')) - if len(pkg_dirs) != 1: - raise OSError('There must be one and only one package dir') - return pkg_dirs[0] - - return pf - - -def sdist_tests(mod_name, repo_path=None, label='fast', doctests=True): - """Make sdist zip, install from it, and run tests""" - if repo_path is None: - repo_path = abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - try: - zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip') - pf = get_sdist_finder(mod_name) - install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, 'bin') - site_pkgs_path = pjoin(install_path, PY_LIB_SDIR) - script_path = pjoin(install_path, 'bin') - cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" - stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, cmd, script_path) - finally: - shutil.rmtree(install_path) - print(stdout) - print(stderr) - - -sdist_tests.__test__ = False - - -def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True): - """Make bdist_egg, unzip it, and run tests from result - - We've got a problem here, because the egg does not contain the scripts, and - so, if we are testing the scripts with ``mod.test()``, we won't pick up the - scripts from the repository we are testing. - - So, you might need to add a label to the script tests, and use the `label` - parameter to indicate these should be skipped. As in: - - bdist_egg_tests('nibabel', None, label='not script_test') - """ - if repo_path is None: - repo_path = abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - scripts_path = pjoin(install_path, 'bin') - try: - zip_fname = make_dist(repo_path, install_path, 'bdist_egg', '*.egg') - zip_extract_all(zip_fname, install_path) - cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" - stdout, stderr = run_mod_cmd(mod_name, install_path, cmd, scripts_path) - finally: - shutil.rmtree(install_path) - print(stdout) - print(stderr) - - -bdist_egg_tests.__test__ = False - - -def make_dist(repo_path, out_dir, setup_params, zipglob): - """Create distutils distribution file - - Parameters - ---------- - repo_path : str - path to repository containing code and ``setup.py`` - out_dir : str - path to which to write new distribution file - setup_params: str - parameters to pass to ``setup.py`` to create distribution. - zipglob : str - glob identifying expected output file. - - Returns - ------- - out_fname : str - filename of generated distribution file - - Examples - -------- - Make, return a zipped sdist:: - - make_dist('/path/to/repo', '/tmp/path', 'sdist --formats=zip', '*.zip') - - Make, return a binary egg:: - - make_dist('/path/to/repo', '/tmp/path', 'bdist_egg', '*.egg') - """ - pwd = os.path.abspath(os.getcwd()) - try: - os.chdir(repo_path) - back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}') - zips = glob(pjoin(out_dir, zipglob)) - if len(zips) != 1: - raise OSError( - f'There must be one and only one {zipglob} ' - f"file, but I found \"{': '.join(zips)}\"" - ) - finally: - os.chdir(pwd) - return zips[0] diff --git a/nisext/tests/__init__.py b/nisext/tests/__init__.py deleted file mode 100644 index af7d1d1dd2..0000000000 --- a/nisext/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Tests for nisext package diff --git a/nisext/tests/test_sexts.py b/nisext/tests/test_sexts.py deleted file mode 100644 index f262ec5685..0000000000 --- a/nisext/tests/test_sexts.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Tests for nisexts.sexts module -""" - -import sys -import types - -import pytest - -from ..sexts import package_check - -FAKE_NAME = 'nisext_improbable' -assert FAKE_NAME not in sys.modules -FAKE_MODULE = types.ModuleType('nisext_fake') - - -def test_package_check(): - # Try to use a required package - raise error - with pytest.raises(RuntimeError): - package_check(FAKE_NAME) - # Optional, log.warn - package_check(FAKE_NAME, optional=True) - # Can also pass a string - package_check(FAKE_NAME, optional='some-package') - try: - # Make a package - sys.modules[FAKE_NAME] = FAKE_MODULE - # Now it passes if we don't check the version - package_check(FAKE_NAME) - # A fake version - FAKE_MODULE.__version__ = '0.2' - package_check(FAKE_NAME, version='0.2') - # fails when version not good enough - with pytest.raises(RuntimeError): - package_check(FAKE_NAME, '0.3') - # Unless optional in which case log.warns - package_check(FAKE_NAME, version='0.3', optional=True) - # Might do custom version check - package_check(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') - finally: - del sys.modules[FAKE_NAME] - - -def test_package_check_setuptools(): - # If setuptools arg not None, missing package just adds it to arg - with pytest.raises(RuntimeError): - package_check(FAKE_NAME, setuptools_args=None) - - def pkg_chk_sta(*args, **kwargs): - st_args = {} - package_check(*args, setuptools_args=st_args, **kwargs) - return st_args - - assert pkg_chk_sta(FAKE_NAME) == {'install_requires': ['nisext_improbable']} - # Check that this gets appended to existing value - old_sta = {'install_requires': ['something']} - package_check(FAKE_NAME, setuptools_args=old_sta) - assert old_sta == {'install_requires': ['something', 'nisext_improbable']} - # That existing value as string gets converted to a list - old_sta = {'install_requires': 'something'} - package_check(FAKE_NAME, setuptools_args=old_sta) - assert old_sta == {'install_requires': ['something', 'nisext_improbable']} - # Optional, add to extras_require - assert pkg_chk_sta(FAKE_NAME, optional='something') == { - 'extras_require': {'something': ['nisext_improbable']} - } - # Check that this gets appended to existing value - old_sta = {'extras_require': {'something': ['amodule']}} - package_check(FAKE_NAME, optional='something', setuptools_args=old_sta) - assert old_sta == {'extras_require': {'something': ['amodule', 'nisext_improbable']}} - # That string gets converted to a list here too - old_sta = {'extras_require': {'something': 'amodule'}} - package_check(FAKE_NAME, optional='something', setuptools_args=old_sta) - assert old_sta == {'extras_require': {'something': ['amodule', 'nisext_improbable']}} - # But optional has to be a string if not empty and setuptools_args defined - with pytest.raises(RuntimeError): - package_check(FAKE_NAME, optional=True, setuptools_args={}) - try: - # Make a package - sys.modules[FAKE_NAME] = FAKE_MODULE - # No install_requires because we already have it - assert pkg_chk_sta(FAKE_NAME) == {} - # A fake version still works - FAKE_MODULE.__version__ = '0.2' - assert pkg_chk_sta(FAKE_NAME, version='0.2') == {} - # goes into install requires when version not good enough - exp_spec = [FAKE_NAME + '>=0.3'] - assert pkg_chk_sta(FAKE_NAME, version='0.3') == {'install_requires': exp_spec} - # Unless optional in which case goes into extras_require - package_check(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') - assert pkg_chk_sta(FAKE_NAME, version='0.3', optional='afeature') == { - 'extras_require': {'afeature': exp_spec} - } - # Might do custom version check - assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') == {} - # If the version check fails, put into requires - bad_getter = lambda x: x.not_an_attribute - exp_spec = [FAKE_NAME + '>=0.2'] - assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=bad_getter) == { - 'install_requires': exp_spec - } - # Likewise for optional dependency - assert pkg_chk_sta( - FAKE_NAME, version='0.2', optional='afeature', version_getter=bad_getter - ) == {'extras_require': {'afeature': [FAKE_NAME + '>=0.2']}} - finally: - del sys.modules[FAKE_NAME] diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py deleted file mode 100644 index f81a40f1df..0000000000 --- a/nisext/tests/test_testers.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Tests for testers -""" - -import os -from os.path import dirname, pathsep - -import pytest - -from ..testers import PYTHON, back_tick, run_mod_cmd - - -def test_back_tick(): - cmd = f'{PYTHON} -c "print(\'Hello\')"' - assert back_tick(cmd) == 'Hello' - assert back_tick(cmd, ret_err=True) == ('Hello', '') - assert back_tick(cmd, True, False) == (b'Hello', b'') - cmd = f'{PYTHON} -c "raise ValueError()"' - with pytest.raises(RuntimeError): - back_tick(cmd) - - -def test_run_mod_cmd(): - mod = 'os' - mod_dir = dirname(os.__file__) - assert run_mod_cmd(mod, mod_dir, "print('Hello')", None, False) == ('Hello', '') - sout, serr = run_mod_cmd(mod, mod_dir, "print('Hello again')") - assert serr == '' - mod_file, out_str = [s.strip() for s in sout.split('\n')] - assert mod_file.startswith(mod_dir) - assert out_str == 'Hello again' - sout, serr = run_mod_cmd(mod, mod_dir, "print(os.environ['PATH'])", None, False) - assert serr == '' - sout2, serr = run_mod_cmd(mod, mod_dir, "print(os.environ['PATH'])", 'pth2', False) - assert serr == '' - assert sout2 == '"pth2"' + pathsep + sout diff --git a/pyproject.toml b/pyproject.toml index 14095b8f22..e92c465e0d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,7 +87,7 @@ exclude = [ ] [tool.hatch.build.targets.wheel] -packages = ["nibabel", "nisext"] +packages = ["nibabel"] exclude = [ # 56MB test file does not need to be installed everywhere "nibabel/nicom/tests/data/4d_multiframe_test.dcm", From e3ffb71891c616deebfe28bfe1f45dc67bb361ce Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Wed, 17 Jan 2024 14:39:37 -0500 Subject: [PATCH 500/702] allow inhomogeneous array --- nibabel/streamlines/tests/test_tractogram.py | 44 ++++++++++++++------ nibabel/streamlines/tractogram.py | 20 +++++++-- 2 files changed, 48 insertions(+), 16 deletions(-) diff --git a/nibabel/streamlines/tests/test_tractogram.py b/nibabel/streamlines/tests/test_tractogram.py index 30294be438..09e3b910be 100644 --- a/nibabel/streamlines/tests/test_tractogram.py +++ b/nibabel/streamlines/tests/test_tractogram.py @@ -80,6 +80,7 @@ def make_dummy_streamline(nb_points): 'mean_curvature': np.array([1.11], dtype='f4'), 'mean_torsion': np.array([1.22], dtype='f4'), 'mean_colors': np.array([1, 0, 0], dtype='f4'), + 'clusters_labels': np.array([0, 1], dtype='i4'), } elif nb_points == 2: @@ -92,6 +93,7 @@ def make_dummy_streamline(nb_points): 'mean_curvature': np.array([2.11], dtype='f4'), 'mean_torsion': np.array([2.22], dtype='f4'), 'mean_colors': np.array([0, 1, 0], dtype='f4'), + 'clusters_labels': np.array([2, 3, 4], dtype='i4'), } elif nb_points == 5: @@ -104,6 +106,7 @@ def make_dummy_streamline(nb_points): 'mean_curvature': np.array([3.11], dtype='f4'), 'mean_torsion': np.array([3.22], dtype='f4'), 'mean_colors': np.array([0, 0, 1], dtype='f4'), + 'clusters_labels': np.array([5, 6, 7, 8], dtype='i4'), } return streamline, data_per_point, data_for_streamline @@ -119,6 +122,7 @@ def setup_module(): DATA['mean_curvature'] = [] DATA['mean_torsion'] = [] DATA['mean_colors'] = [] + DATA['clusters_labels'] = [] for nb_points in [1, 2, 5]: data = make_dummy_streamline(nb_points) streamline, data_per_point, data_for_streamline = data @@ -128,12 +132,14 @@ def setup_module(): DATA['mean_curvature'].append(data_for_streamline['mean_curvature']) DATA['mean_torsion'].append(data_for_streamline['mean_torsion']) DATA['mean_colors'].append(data_for_streamline['mean_colors']) + DATA['clusters_labels'].append(data_for_streamline['clusters_labels']) DATA['data_per_point'] = {'colors': DATA['colors'], 'fa': DATA['fa']} DATA['data_per_streamline'] = { 'mean_curvature': DATA['mean_curvature'], 'mean_torsion': DATA['mean_torsion'], 'mean_colors': DATA['mean_colors'], + 'clusters_labels': DATA['clusters_labels'], } DATA['empty_tractogram'] = Tractogram(affine_to_rasmm=np.eye(4)) @@ -154,6 +160,7 @@ def setup_module(): 'mean_curvature': lambda: (e for e in DATA['mean_curvature']), 'mean_torsion': lambda: (e for e in DATA['mean_torsion']), 'mean_colors': lambda: (e for e in DATA['mean_colors']), + 'clusters_labels': lambda: (e for e in DATA['clusters_labels']), } DATA['lazy_tractogram'] = LazyTractogram( @@ -214,7 +221,10 @@ def test_per_array_dict_creation(self): data_dict = PerArrayDict(nb_streamlines, data_per_streamline) assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): - assert_array_equal(data_dict[k], data_per_streamline[k]) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] assert len(data_dict) == len(data_per_streamline) - 1 @@ -224,7 +234,10 @@ def test_per_array_dict_creation(self): data_dict = PerArrayDict(nb_streamlines, data_per_streamline) assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): - assert_array_equal(data_dict[k], data_per_streamline[k]) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] assert len(data_dict) == len(data_per_streamline) - 1 @@ -234,7 +247,10 @@ def test_per_array_dict_creation(self): data_dict = PerArrayDict(nb_streamlines, **data_per_streamline) assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): - assert_array_equal(data_dict[k], data_per_streamline[k]) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] assert len(data_dict) == len(data_per_streamline) - 1 @@ -261,6 +277,7 @@ def test_extend(self): 'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'mean_colors': 4 * np.array(DATA['mean_colors']), + 'clusters_labels': 5 * np.array(DATA['clusters_labels'], dtype=object), } sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) @@ -284,7 +301,8 @@ def test_extend(self): 'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'mean_colors': 4 * np.array(DATA['mean_colors']), - 'other': 5 * np.array(DATA['mean_colors']), + 'clusters_labels': 5 * np.array(DATA['clusters_labels'], dtype=object), + 'other': 6 * np.array(DATA['mean_colors']), } sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) @@ -305,6 +323,7 @@ def test_extend(self): 'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'mean_colors': 4 * np.array(DATA['mean_torsion']), + 'clusters_labels': 5 * np.array(DATA['clusters_labels'], dtype=object), } sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) with pytest.raises(ValueError): @@ -441,7 +460,10 @@ def test_lazydict_creation(self): assert is_lazy_dict(data_dict) assert data_dict.keys() == expected_keys for k in data_dict.keys(): - assert_array_equal(list(data_dict[k]), list(DATA['data_per_streamline'][k])) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(list(data_dict[k]), list(DATA['data_per_streamline'][k])) assert len(data_dict) == len(DATA['data_per_streamline_func']) @@ -578,6 +600,7 @@ def test_tractogram_add_new_data(self): t.data_per_streamline['mean_curvature'] = DATA['mean_curvature'] t.data_per_streamline['mean_torsion'] = DATA['mean_torsion'] t.data_per_streamline['mean_colors'] = DATA['mean_colors'] + t.data_per_streamline['clusters_labels'] = DATA['clusters_labels'] assert_tractogram_equal(t, DATA['tractogram']) # Retrieve tractogram by their index. @@ -598,6 +621,7 @@ def test_tractogram_add_new_data(self): t.data_per_streamline['mean_curvature'] = DATA['mean_curvature'] t.data_per_streamline['mean_torsion'] = DATA['mean_torsion'] t.data_per_streamline['mean_colors'] = DATA['mean_colors'] + t.data_per_streamline['clusters_labels'] = DATA['clusters_labels'] assert_tractogram_equal(t, DATA['tractogram']) def test_tractogram_copy(self): @@ -647,14 +671,6 @@ def test_creating_invalid_tractogram(self): with pytest.raises(ValueError): Tractogram(streamlines=DATA['streamlines'], data_per_point={'scalars': scalars}) - # Inconsistent dimension for a data_per_streamline. - properties = [[1.11, 1.22], [2.11], [3.11, 3.22]] - - with pytest.raises(ValueError): - Tractogram( - streamlines=DATA['streamlines'], data_per_streamline={'properties': properties} - ) - # Too many dimension for a data_per_streamline. properties = [ np.array([[1.11], [1.22]], dtype='f4'), @@ -870,6 +886,7 @@ def test_lazy_tractogram_from_data_func(self): DATA['mean_curvature'], DATA['mean_torsion'], DATA['mean_colors'], + DATA['clusters_labels'], ] def _data_gen(): @@ -879,6 +896,7 @@ def _data_gen(): 'mean_curvature': d[3], 'mean_torsion': d[4], 'mean_colors': d[5], + 'clusters_labels': d[6], } yield TractogramItem(d[0], data_for_streamline, data_for_points) diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index 9e7c0f9af2..5a39b415a6 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -1,6 +1,7 @@ import copy import numbers -from collections.abc import MutableMapping +import types +from collections.abc import Iterable, MutableMapping from warnings import warn import numpy as np @@ -101,15 +102,28 @@ def __init__(self, n_rows=0, *args, **kwargs): super().__init__(*args, **kwargs) def __setitem__(self, key, value): - value = np.asarray(list(value)) + dtype = np.float64 + + if isinstance(value, types.GeneratorType): + value = list(value) + + if isinstance(value, np.ndarray): + dtype = value.dtype + elif not all(len(v) == len(value[0]) for v in value[1:]): + dtype = object + + value = np.asarray(value, dtype=dtype) if value.ndim == 1 and value.dtype != object: # Reshape without copy value.shape = (len(value), 1) - if value.ndim != 2: + if value.ndim != 2 and value.dtype != object: raise ValueError('data_per_streamline must be a 2D array.') + if value.dtype == object and not all(isinstance(v, Iterable) for v in value): + raise ValueError('data_per_streamline must be a 2D array') + # We make sure there is the right amount of values if 0 < self.n_rows != len(value): msg = f'The number of values ({len(value)}) should match n_elements ({self.n_rows}).' From 6919b539401541fc5935e83610a0579e690ba79f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 22 Feb 2024 21:35:21 -0500 Subject: [PATCH 501/702] TEST: Accommodate pytest 8 changes --- nibabel/testing/__init__.py | 12 ++++++ nibabel/tests/test_image_api.py | 56 +++++++++------------------ nibabel/tests/test_image_load_save.py | 4 +- nibabel/tests/test_loadsave.py | 26 ++++++------- nibabel/tests/test_onetime.py | 4 +- nibabel/tests/test_orientations.py | 4 +- nibabel/tests/test_spatialimages.py | 12 +++--- 7 files changed, 56 insertions(+), 62 deletions(-) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 5baa5e2b86..21ecadf841 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -233,3 +233,15 @@ def expires(version): return lambda x: x return pytest.mark.xfail(raises=ExpiredDeprecationError) + + +def deprecated_to(version): + """Context manager to expect DeprecationWarnings until a given version""" + from packaging.version import Version + + from nibabel import __version__ as nbver + + if Version(nbver) < Version(version): + return pytest.deprecated_call() + + return nullcontext() diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index f1fc720716..86c04985f8 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -48,6 +48,7 @@ bytesio_filemap, bytesio_round_trip, clear_and_catch_warnings, + deprecated_to, expires, nullcontext, ) @@ -80,10 +81,6 @@ from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES -def maybe_deprecated(meth_name): - return pytest.deprecated_call() if meth_name == 'get_data' else nullcontext() - - class GenericImageAPI(ValidateAPI): """General image validation API""" @@ -194,7 +191,7 @@ def validate_no_slicing(self, imaker, params): @expires('5.0.0') def validate_get_data_deprecated(self, imaker, params): img = imaker() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): data = img.get_data() assert_array_equal(np.asanyarray(img.dataobj), data) @@ -246,14 +243,12 @@ def validate_data_interface(self, imaker, params): self._check_array_interface(imaker, meth_name) method = getattr(img, meth_name) # Data shape is same as image shape - with maybe_deprecated(meth_name): - assert img.shape == method().shape + assert img.shape == method().shape # Data ndim is same as image ndim - with maybe_deprecated(meth_name): - assert img.ndim == method().ndim + assert img.ndim == method().ndim # Values to get_data caching parameter must be 'fill' or # 'unchanged' - with maybe_deprecated(meth_name), pytest.raises(ValueError): + with pytest.raises(ValueError): method(caching='something') # dataobj is read only fake_data = np.zeros(img.shape, dtype=img.get_data_dtype()) @@ -277,13 +272,11 @@ def _check_proxy_interface(self, imaker, meth_name): assert not img.in_memory # Load with caching='unchanged' method = getattr(img, meth_name) - with maybe_deprecated(meth_name): - data = method(caching='unchanged') + data = method(caching='unchanged') # Still not cached assert not img.in_memory # Default load, does caching - with maybe_deprecated(meth_name): - data = method() + data = method() # Data now cached. in_memory is True if either of the get_data # or get_fdata caches are not-None assert img.in_memory @@ -295,36 +288,30 @@ def _check_proxy_interface(self, imaker, meth_name): # integers, but lets assume that's not true here. assert_array_equal(proxy_data, data) # Now caching='unchanged' does nothing, returns cached version - with maybe_deprecated(meth_name): - data_again = method(caching='unchanged') + data_again = method(caching='unchanged') assert data is data_again # caching='fill' does nothing because the cache is already full - with maybe_deprecated(meth_name): - data_yet_again = method(caching='fill') + data_yet_again = method(caching='fill') assert data is data_yet_again # changing array data does not change proxy data, or reloaded # data data[:] = 42 assert_array_equal(proxy_data, proxy_copy) assert_array_equal(np.asarray(img.dataobj), proxy_copy) - # It does change the result of get_data - with maybe_deprecated(meth_name): - assert_array_equal(method(), 42) + # It does change the result of get_fdata + assert_array_equal(method(), 42) # until we uncache img.uncache() # Which unsets in_memory assert not img.in_memory - with maybe_deprecated(meth_name): - assert_array_equal(method(), proxy_copy) + assert_array_equal(method(), proxy_copy) # Check caching='fill' does cache data img = imaker() method = getattr(img, meth_name) assert not img.in_memory - with maybe_deprecated(meth_name): - data = method(caching='fill') + data = method(caching='fill') assert img.in_memory - with maybe_deprecated(meth_name): - data_again = method() + data_again = method() assert data is data_again # Check that caching refreshes for new floating point type. img.uncache() @@ -368,8 +355,7 @@ def _check_array_caching(self, imaker, meth_name, caching): get_data_func = method if caching is None else partial(method, caching=caching) assert isinstance(img.dataobj, np.ndarray) assert img.in_memory - with maybe_deprecated(meth_name): - data = get_data_func() + data = get_data_func() # Returned data same object as underlying dataobj if using # old ``get_data`` method, or using newer ``get_fdata`` # method, where original array was float64. @@ -377,8 +363,7 @@ def _check_array_caching(self, imaker, meth_name, caching): dataobj_is_data = arr_dtype == np.float64 or method == img.get_data # Set something to the output array. data[:] = 42 - with maybe_deprecated(meth_name): - get_result_changed = np.all(get_data_func() == 42) + get_result_changed = np.all(get_data_func() == 42) assert get_result_changed == (dataobj_is_data or caching != 'unchanged') if dataobj_is_data: assert data is img.dataobj @@ -387,15 +372,13 @@ def _check_array_caching(self, imaker, meth_name, caching): assert_array_equal(np.asarray(img.dataobj), 42) # Uncache has no effect img.uncache() - with maybe_deprecated(meth_name): - assert_array_equal(get_data_func(), 42) + assert_array_equal(get_data_func(), 42) else: assert not data is img.dataobj assert not np.all(np.asarray(img.dataobj) == 42) # Uncache does have an effect img.uncache() - with maybe_deprecated(meth_name): - assert not np.all(get_data_func() == 42) + assert not np.all(get_data_func() == 42) # in_memory is always true for array images, regardless of # cache state. img.uncache() @@ -408,8 +391,7 @@ def _check_array_caching(self, imaker, meth_name, caching): if arr_dtype not in float_types: return for float_type in float_types: - with maybe_deprecated(meth_name): - data = get_data_func(dtype=float_type) + data = get_data_func(dtype=float_type) assert (data is img.dataobj) == (arr_dtype == float_type) def validate_shape(self, imaker, params): diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 962a2433bf..706a87f10f 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -40,7 +40,7 @@ from .. import spm99analyze as spm99 from ..optpkg import optional_package from ..spatialimages import SpatialImage -from ..testing import expires +from ..testing import deprecated_to, expires from ..tmpdirs import InTemporaryDirectory from ..volumeutils import native_code, swapped_code @@ -285,7 +285,7 @@ def test_filename_save(): @expires('5.0.0') def test_guessed_image_type(): # Test whether we can guess the image type from example files - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert nils.guessed_image_type(pjoin(DATA_PATH, 'example4d.nii.gz')) == Nifti1Image assert nils.guessed_image_type(pjoin(DATA_PATH, 'nifti1.hdr')) == Nifti1Pair assert nils.guessed_image_type(pjoin(DATA_PATH, 'example_nifti2.nii.gz')) == Nifti2Image diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 4071b09f72..401ed04535 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -21,7 +21,7 @@ from ..loadsave import _signature_matches_extension, load, read_img_data from ..openers import Opener from ..optpkg import optional_package -from ..testing import expires +from ..testing import deprecated_to, expires from ..tmpdirs import InTemporaryDirectory _, have_scipy, _ = optional_package('scipy') @@ -50,14 +50,14 @@ def test_read_img_data(): fpath = pathlib.Path(fpath) img = load(fpath) data = img.get_fdata() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): data2 = read_img_data(img) assert_array_equal(data, data2) # These examples have null scaling - assert prefer=unscaled is the same dao = img.dataobj if hasattr(dao, 'slope') and hasattr(img.header, 'raw_data_from_fileobj'): assert (dao.slope, dao.inter) == (1, 0) - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(read_img_data(img, prefer='unscaled'), data) # Assert all caps filename works as well with TemporaryDirectory() as tmpdir: @@ -140,21 +140,21 @@ def test_read_img_data_nifti(): img = img_class(data, np.eye(4)) img.set_data_dtype(out_dtype) # No filemap => error - with pytest.deprecated_call(), pytest.raises(ImageFileError): + with deprecated_to('5.0.0'), pytest.raises(ImageFileError): read_img_data(img) # Make a filemap froot = f'an_image_{i}' img.file_map = img.filespec_to_file_map(froot) # Trying to read from this filemap will generate an error because # we are going to read from files that do not exist - with pytest.deprecated_call(), pytest.raises(OSError): + with deprecated_to('5.0.0'), pytest.raises(OSError): read_img_data(img) img.to_file_map() # Load - now the scaling and offset correctly applied img_fname = img.file_map['image'].filename img_back = load(img_fname) data_back = img_back.get_fdata() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(data_back, read_img_data(img_back)) # This is the same as if we loaded the image and header separately hdr_fname = img.file_map['header'].filename if 'header' in img.file_map else img_fname @@ -166,16 +166,16 @@ def test_read_img_data_nifti(): # Unscaled is the same as returned from raw_data_from_fileobj with open(img_fname, 'rb') as fobj: unscaled_back = hdr_back.raw_data_from_fileobj(fobj) - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(unscaled_back, read_img_data(img_back, prefer='unscaled')) # If we futz with the scaling in the header, the result changes - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(data_back, read_img_data(img_back)) has_inter = hdr_back.has_data_intercept old_slope = hdr_back['scl_slope'] old_inter = hdr_back['scl_inter'] if has_inter else 0 est_unscaled = (data_back - old_inter) / old_slope - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): actual_unscaled = read_img_data(img_back, prefer='unscaled') assert_almost_equal(est_unscaled, actual_unscaled) img_back.header['scl_slope'] = 2.1 @@ -185,10 +185,10 @@ def test_read_img_data_nifti(): else: new_inter = 0 # scaled scaling comes from new parameters in header - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert np.allclose(actual_unscaled * 2.1 + new_inter, read_img_data(img_back)) # Unscaled array didn't change - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(actual_unscaled, read_img_data(img_back, prefer='unscaled')) # Check the offset too img.header.set_data_offset(1024) @@ -200,14 +200,14 @@ def test_read_img_data_nifti(): fobj.write(b'\x00\x00') img_back = load(img_fname) data_back = img_back.get_fdata() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(data_back, read_img_data(img_back)) img_back.header.set_data_offset(1026) # Check we pick up new offset exp_offset = np.zeros((data.size,), data.dtype) + old_inter exp_offset[:-1] = np.ravel(data_back, order='F')[1:] exp_offset = np.reshape(exp_offset, shape, order='F') - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(exp_offset, read_img_data(img_back)) # Delete stuff that might hold onto file references del img, img_back, data_back diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index 426702fa43..b22a4ef3ec 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,12 +1,12 @@ import pytest from nibabel.onetime import auto_attr, setattr_on_read -from nibabel.testing import expires +from nibabel.testing import deprecated_to, expires @expires('5.0.0') def test_setattr_on_read(): - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): class MagicProp: @setattr_on_read diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 8821fac0e0..0094711e79 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -26,7 +26,7 @@ ornt2axcodes, ornt_transform, ) -from ..testing import expires +from ..testing import deprecated_to, expires IN_ARRS = [ np.eye(4), @@ -407,6 +407,6 @@ def test_inv_ornt_aff(): def test_flip_axis_deprecation(): a = np.arange(24).reshape((2, 3, 4)) axis = 1 - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): a_flipped = flip_axis(a, axis) assert_array_equal(a_flipped, np.flip(a, axis)) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 5cad23a22f..7157d5c459 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -18,7 +18,7 @@ from .. import load as top_load from ..imageclasses import spatial_axes_first from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage -from ..testing import bytesio_round_trip, expires, memmap_after_ufunc +from ..testing import bytesio_round_trip, deprecated_to, expires, memmap_after_ufunc from ..tmpdirs import InTemporaryDirectory @@ -368,7 +368,7 @@ def test_get_data(self): in_data = in_data_template.copy() img = img_klass(in_data, None) assert in_data is img.dataobj - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): out_data = img.get_data() assert in_data is out_data # and that uncache has no effect @@ -381,18 +381,18 @@ def test_get_data(self): rt_img = bytesio_round_trip(img) assert in_data is not rt_img.dataobj assert (rt_img.dataobj == in_data).all() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): out_data = rt_img.get_data() assert (out_data == in_data).all() assert rt_img.dataobj is not out_data # cache - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert rt_img.get_data() is out_data out_data[:] = 42 rt_img.uncache() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert rt_img.get_data() is not out_data - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert (rt_img.get_data() == in_data).all() def test_slicer(self): From 511ca0b4e53e1b51c5dc24c6226739862183f559 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 22 Feb 2024 21:37:41 -0500 Subject: [PATCH 502/702] TYP: Update ignore comments --- nibabel/_compression.py | 2 +- nibabel/benchmarks/bench_arrayproxy_slicing.py | 2 +- nibabel/cmdline/dicomfs.py | 2 +- nibabel/minc2.py | 2 +- nibabel/spm99analyze.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nibabel/_compression.py b/nibabel/_compression.py index 75a5e3bbf4..b7cfc8f49f 100644 --- a/nibabel/_compression.py +++ b/nibabel/_compression.py @@ -17,7 +17,7 @@ from .optpkg import optional_package if ty.TYPE_CHECKING: # pragma: no cover - import indexed_gzip # type: ignore[import-not-found] + import indexed_gzip # type: ignore[import] import pyzstd HAVE_INDEXED_GZIP = True diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index dc9acfdedd..305c5215e4 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -26,7 +26,7 @@ # if memory_profiler is installed, we get memory usage results try: - from memory_profiler import memory_usage # type: ignore[import-not-found] + from memory_profiler import memory_usage # type: ignore[import] except ImportError: memory_usage = None diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index dec4011c51..66ffb8adea 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -25,7 +25,7 @@ class dummy_fuse: try: - import fuse # type: ignore[import-not-found] + import fuse # type: ignore[import] uid = os.getuid() gid = os.getgid() diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 94e1be76e2..912b5d28ae 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -163,7 +163,7 @@ class Minc2Image(Minc1Image): def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Import of h5py might take awhile for MPI-enabled builds # So we are importing it here "on demand" - import h5py # type: ignore[import-not-found] + import h5py # type: ignore[import] holder = file_map['image'] if holder.filename is None: diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index c859d702f4..3465c57190 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -275,7 +275,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): contents = matf.read() if len(contents) == 0: return ret - import scipy.io as sio # type: ignore[import-not-found] + import scipy.io as sio # type: ignore[import] mats = sio.loadmat(BytesIO(contents)) if 'mat' in mats: # this overrides a 'M', and includes any flip From cff293645aa71361882ac4e300a124790d5d6f19 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 22 Feb 2024 22:26:03 -0500 Subject: [PATCH 503/702] TEST: Prepare tests to fail at 6.0 --- nibabel/gifti/tests/test_gifti.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index a2f8395cae..5cc2756c60 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -14,7 +14,7 @@ from ... import load from ...fileholders import FileHolder from ...nifti1 import data_type_codes -from ...testing import get_test_data +from ...testing import deprecated_to, expires, get_test_data from .. import ( GiftiCoordSystem, GiftiDataArray, @@ -275,27 +275,29 @@ def test_labeltable(): assert len(img.labeltable.labels) == 2 +@expires('6.0.0') def test_metadata(): md = GiftiMetaData(key='value') # Old initialization methods - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): nvpair = GiftiNVPairs('key', 'value') with pytest.warns(FutureWarning) as w: md2 = GiftiMetaData(nvpair=nvpair) assert len(w) == 1 - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): md3 = GiftiMetaData.from_dict({'key': 'value'}) assert md == md2 == md3 == {'key': 'value'} # .data as a list of NVPairs is going away - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): assert md.data[0].name == 'key' + with deprecated_to('6.0.0'): assert md.data[0].value == 'value' - assert len(w) == 2 +@expires('6.0.0') def test_metadata_list_interface(): md = GiftiMetaData(key='value') - with pytest.warns(DeprecationWarning): + with deprecated_to('6.0.0'): mdlist = md.data assert len(mdlist) == 1 assert mdlist[0].name == 'key' @@ -312,7 +314,7 @@ def test_metadata_list_interface(): assert md['foo'] == 'bar' # Append new NVPair - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): nvpair = GiftiNVPairs('key', 'value') mdlist.append(nvpair) assert len(mdlist) == 2 From 4b65364e6f255ab5a574c532a1b751265a8b48b1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 8 Feb 2024 08:58:57 -0500 Subject: [PATCH 504/702] DATA: Add dcm_qa_xa30 as submodule for test data --- .gitmodules | 3 +++ nibabel-data/dcm_qa_xa30 | 1 + 2 files changed, 4 insertions(+) create mode 160000 nibabel-data/dcm_qa_xa30 diff --git a/.gitmodules b/.gitmodules index cdcef650f1..20e97c2ebb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -19,3 +19,6 @@ [submodule "nibabel-data/nitest-dicom"] path = nibabel-data/nitest-dicom url = https://github.com/effigies/nitest-dicom +[submodule "nibabel-data/dcm_qa_xa30"] + path = nibabel-data/dcm_qa_xa30 + url = https://github.com/neurolabusc/dcm_qa_xa30.git diff --git a/nibabel-data/dcm_qa_xa30 b/nibabel-data/dcm_qa_xa30 new file mode 160000 index 0000000000..89b2509218 --- /dev/null +++ b/nibabel-data/dcm_qa_xa30 @@ -0,0 +1 @@ +Subproject commit 89b2509218a6dd021c5d40ddaf2a017ac1bacafc From bc227ec4658f9f28e54e6861694ca14e97b229c1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 22 Feb 2024 16:49:56 -0500 Subject: [PATCH 505/702] TEST: Add test for Siemens TRACE volume --- nibabel/nicom/tests/test_dicomwrappers.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 083357537e..5c29349362 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -35,6 +35,11 @@ DATA_FILE_EMPTY_ST = pjoin(IO_DATA_PATH, 'slicethickness_empty_string.dcm') DATA_FILE_4D_DERIVED = pjoin(get_nibabel_data(), 'nitest-dicom', '4d_multiframe_with_derived.dcm') DATA_FILE_CT = pjoin(get_nibabel_data(), 'nitest-dicom', 'siemens_ct_header_csa.dcm') +DATA_FILE_SIEMENS_TRACE = pjoin( + get_nibabel_data(), + 'dcm_qa_xa30', + 'In/20_DWI_dir80_AP/0001_1.3.12.2.1107.5.2.43.67093.2022071112140611403312307.dcm', +) # This affine from our converted image was shown to match our image spatially # with an image from SPM DICOM conversion. We checked the matching with SPM @@ -656,6 +661,13 @@ def test_data_derived_shape(self): with pytest.warns(UserWarning, match='Derived images found and removed'): assert dw.image_shape == (96, 96, 60, 33) + @dicom_test + @needs_nibabel_data('dcm_qa_xa30') + def test_data_trace(self): + # Test that a standalone trace volume is found and not dropped + dw = didw.wrapper_from_file(DATA_FILE_SIEMENS_TRACE) + assert dw.image_shape == (72, 72, 39, 1) + @dicom_test @needs_nibabel_data('nitest-dicom') def test_data_unreadable_private_headers(self): From 3f81a96b61106d218da51c0453de23c4e6669bf6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 22 Feb 2024 16:50:05 -0500 Subject: [PATCH 506/702] FIX: Conditionally drop isotropic frames --- nibabel/nicom/dicomwrappers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 42d4b1413f..5ff4f33052 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -509,11 +509,14 @@ def image_shape(self): if hasattr(first_frame, 'get') and first_frame.get([0x18, 0x9117]): # DWI image may include derived isotropic, ADC or trace volume try: - self.frames = pydicom.Sequence( + anisotropic = pydicom.Sequence( frame for frame in self.frames if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' ) + # Image contains DWI volumes followed by derived images; remove derived images + if len(anisotropic) != 0: + self.frames = anisotropic except IndexError: # Sequence tag is found but missing items! raise WrapperError('Diffusion file missing information') From 79792de0bff76d0a98781c3910b31d6cda6f21d0 Mon Sep 17 00:00:00 2001 From: manifest-rules Date: Fri, 23 Feb 2024 09:57:36 +0000 Subject: [PATCH 507/702] TEST: Unit test for loading ASCII-encoded "flat" GIFTI data array. Currently failing --- nibabel/gifti/tests/data/ascii_flat_data.gii | 76 ++++++++++++++++++++ nibabel/gifti/tests/test_parse_gifti_fast.py | 15 +++- 2 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 nibabel/gifti/tests/data/ascii_flat_data.gii diff --git a/nibabel/gifti/tests/data/ascii_flat_data.gii b/nibabel/gifti/tests/data/ascii_flat_data.gii new file mode 100644 index 0000000000..26a73fba02 --- /dev/null +++ b/nibabel/gifti/tests/data/ascii_flat_data.gii @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 + + 155.17539978 135.58103943 98.30715179 140.33973694 190.0491333 73.24776459 157.3598938 196.97969055 83.65809631 171.46174622 137.43661499 78.4709549 148.54592896 97.06752777 65.96373749 123.45701599 111.46841431 66.3571167 135.30892944 202.28720093 36.38148499 178.28155518 162.59469604 37.75128937 178.11087036 115.28820038 57.17986679 142.81582642 82.82115173 31.02205276 + + + + + + + + + + + + + 6402 17923 25602 14085 25602 17923 25602 14085 4483 17923 1602 14085 4483 25603 25602 25604 25602 25603 25602 25604 6402 25603 3525 25604 1123 17922 12168 25604 12168 17922 + + diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index f08bdd1b17..49f2729f37 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -39,9 +39,10 @@ DATA_FILE5 = pjoin(IO_DATA_PATH, 'base64bin.gii') DATA_FILE6 = pjoin(IO_DATA_PATH, 'rh.aparc.annot.gii') DATA_FILE7 = pjoin(IO_DATA_PATH, 'external.gii') +DATA_FILE8 = pjoin(IO_DATA_PATH, 'ascii_flat_data.gii') -datafiles = [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6, DATA_FILE7] -numDA = [2, 1, 1, 1, 2, 1, 2] +datafiles = [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6, DATA_FILE7, DATA_FILE8] +numDA = [2, 1, 1, 1, 2, 1, 2, 2] DATA_FILE1_darr1 = np.array( [ @@ -152,6 +153,10 @@ dtype=np.int32, ) +DATA_FILE8_darr1 = np.copy(DATA_FILE5_darr1) + +DATA_FILE8_darr2 = np.copy(DATA_FILE5_darr2) + def assert_default_types(loaded): default = loaded.__class__() @@ -448,3 +453,9 @@ def test_load_compressed(): img7 = load(fn) assert_array_almost_equal(img7.darrays[0].data, DATA_FILE7_darr1) assert_array_almost_equal(img7.darrays[1].data, DATA_FILE7_darr2) + + +def test_load_flat_ascii_data(): + img = load(DATA_FILE8) + assert_array_almost_equal(img.darrays[0].data, DATA_FILE8_darr1) + assert_array_almost_equal(img.darrays[1].data, DATA_FILE8_darr2) From 6ffeeacc158c51111691e91fbb2fbbc303f42cd8 Mon Sep 17 00:00:00 2001 From: manifest-rules Date: Fri, 23 Feb 2024 10:08:14 +0000 Subject: [PATCH 508/702] RF: Make sure that ASCII-encoded DataArrays are returned with expected shape --- nibabel/gifti/parse_gifti_fast.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 7d8eacb825..af01dd544b 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -74,6 +74,10 @@ def read_data_block(darray, fname, data, mmap): # GIFTI_ENCODING_ASCII c = StringIO(data) da = np.loadtxt(c, dtype=dtype) + # Reshape to dims specified in GiftiDataArray attributes, but preserve + # existing behaviour of loading as 1D for arrays with a dimension of + # length 1 + da = da.reshape(darray.dims).squeeze() return da # independent of the endianness elif enclabel not in ('B64BIN', 'B64GZ', 'External'): return 0 From b46c82946d6bd88b73164904834567b12aadf935 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 23 Feb 2024 10:05:56 -0500 Subject: [PATCH 509/702] RF: Consistently apply data type, shape and index order in GIFTI data blocks --- nibabel/gifti/parse_gifti_fast.py | 70 +++++++++++++------------------ 1 file changed, 29 insertions(+), 41 deletions(-) diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index af01dd544b..ccd608324a 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -68,21 +68,21 @@ def read_data_block(darray, fname, data, mmap): if mmap is True: mmap = 'c' enclabel = gifti_encoding_codes.label[darray.encoding] - dtype = data_type_codes.type[darray.datatype] + if enclabel not in ('ASCII', 'B64BIN', 'B64GZ', 'External'): + raise GiftiParseError(f'Unknown encoding {darray.encoding}') + + # Encode the endianness in the dtype + byteorder = gifti_endian_codes.byteorder[darray.endian] + dtype = data_type_codes.dtype[darray.datatype].newbyteorder(byteorder) + + shape = tuple(darray.dims) + order = array_index_order_codes.npcode[darray.ind_ord] + + # GIFTI_ENCODING_ASCII if enclabel == 'ASCII': - # GIFTI_ENCODING_ASCII - c = StringIO(data) - da = np.loadtxt(c, dtype=dtype) - # Reshape to dims specified in GiftiDataArray attributes, but preserve - # existing behaviour of loading as 1D for arrays with a dimension of - # length 1 - da = da.reshape(darray.dims).squeeze() - return da # independent of the endianness - elif enclabel not in ('B64BIN', 'B64GZ', 'External'): - return 0 - - # GIFTI_ENCODING_EXTBIN + return np.loadtxt(StringIO(data), dtype=dtype, ndmin=1).reshape(shape, order=order) + # We assume that the external data file is raw uncompressed binary, with # the data type/endianness/ordering specified by the other DataArray # attributes @@ -98,12 +98,13 @@ def read_data_block(darray, fname, data, mmap): newarr = None if mmap: try: - newarr = np.memmap( + return np.memmap( ext_fname, dtype=dtype, mode=mmap, offset=darray.ext_offset, - shape=tuple(darray.dims), + shape=shape, + order=order, ) # If the memmap fails, we ignore the error and load the data into # memory below @@ -111,13 +112,12 @@ def read_data_block(darray, fname, data, mmap): pass # mmap=False or np.memmap failed if newarr is None: - # We can replace this with a call to np.fromfile in numpy>=1.17, - # as an "offset" parameter was added in that version. - with open(ext_fname, 'rb') as f: - f.seek(darray.ext_offset) - nbytes = np.prod(darray.dims) * dtype().itemsize - buff = f.read(nbytes) - newarr = np.frombuffer(buff, dtype=dtype) + return np.fromfile( + ext_fname, + dtype=dtype, + count=np.prod(darray.dims), + offset=darray.ext_offset, + ).reshape(shape, order=order) # Numpy arrays created from bytes objects are read-only. # Neither b64decode nor decompress will return bytearrays, and there @@ -125,26 +125,14 @@ def read_data_block(darray, fname, data, mmap): # there is not a simple way to avoid making copies. # If this becomes a problem, we should write a decoding interface with # a tunable chunk size. + dec = base64.b64decode(data.encode('ascii')) + if enclabel == 'B64BIN': + buff = bytearray(dec) else: - dec = base64.b64decode(data.encode('ascii')) - if enclabel == 'B64BIN': - # GIFTI_ENCODING_B64BIN - buff = bytearray(dec) - else: - # GIFTI_ENCODING_B64GZ - buff = bytearray(zlib.decompress(dec)) - del dec - newarr = np.frombuffer(buff, dtype=dtype) - - sh = tuple(darray.dims) - if len(newarr.shape) != len(sh): - newarr = newarr.reshape(sh, order=array_index_order_codes.npcode[darray.ind_ord]) - - # check if we need to byteswap - required_byteorder = gifti_endian_codes.byteorder[darray.endian] - if required_byteorder in ('big', 'little') and required_byteorder != sys.byteorder: - newarr = newarr.byteswap() - return newarr + # GIFTI_ENCODING_B64GZ + buff = bytearray(zlib.decompress(dec)) + del dec + return np.frombuffer(buff, dtype=dtype).reshape(shape, order=order) def _str2int(in_str): From afbcc88d2c3ff83df3acadbff4741a790d2d5647 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 23 Feb 2024 10:08:22 -0500 Subject: [PATCH 510/702] TEST: Expect data arrays to be the advertised shapes --- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 13 +++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 76bad4677a..7aba877309 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -745,7 +745,7 @@ def agg_data(self, intent_code=None): >>> triangles_2 = surf_img.agg_data('triangle') >>> triangles_3 = surf_img.agg_data(1009) # Numeric code for pointset >>> print(np.array2string(triangles)) - [0 1 2] + [[0 1 2]] >>> np.array_equal(triangles, triangles_2) True >>> np.array_equal(triangles, triangles_3) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 49f2729f37..f972425679 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -41,7 +41,16 @@ DATA_FILE7 = pjoin(IO_DATA_PATH, 'external.gii') DATA_FILE8 = pjoin(IO_DATA_PATH, 'ascii_flat_data.gii') -datafiles = [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6, DATA_FILE7, DATA_FILE8] +datafiles = [ + DATA_FILE1, + DATA_FILE2, + DATA_FILE3, + DATA_FILE4, + DATA_FILE5, + DATA_FILE6, + DATA_FILE7, + DATA_FILE8, +] numDA = [2, 1, 1, 1, 2, 1, 2, 2] DATA_FILE1_darr1 = np.array( @@ -51,7 +60,7 @@ [-17.614349, -65.401642, 21.071466], ] ) -DATA_FILE1_darr2 = np.array([0, 1, 2]) +DATA_FILE1_darr2 = np.array([[0, 1, 2]]) DATA_FILE2_darr1 = np.array( [ From 8cc8f05e98f2be2e7cf2b6c68636c97e47099aff Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 23 Feb 2024 10:29:43 -0500 Subject: [PATCH 511/702] CI: Configure dependabot to update official actions in bulk --- .github/dependabot.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..6c9e83fcbf --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + groups: + actions-infrastructure: + patterns: + - "actions/*" From d641e44347caad6f52751b3d4f933cd11e8350d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Feb 2024 15:30:27 +0000 Subject: [PATCH 512/702] Build(deps): Bump the actions-infrastructure group with 3 updates Bumps the actions-infrastructure group with 3 updates: [actions/setup-python](https://github.com/actions/setup-python), [actions/upload-artifact](https://github.com/actions/upload-artifact) and [actions/download-artifact](https://github.com/actions/download-artifact). Updates `actions/setup-python` from 4 to 5 - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v4...v5) Updates `actions/upload-artifact` from 3 to 4 - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4) Updates `actions/download-artifact` from 3 to 4 - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major dependency-group: actions-infrastructure - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major dependency-group: actions-infrastructure - dependency-name: actions/download-artifact dependency-type: direct:production update-type: version-update:semver-major dependency-group: actions-infrastructure ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fc9afdc218..ac78e7c9cd 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,7 +44,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3 - run: pip install --upgrade build twine @@ -54,12 +54,12 @@ jobs: - name: Build git archive run: mkdir archive && git archive -v -o archive/nibabel-archive.tgz HEAD - name: Upload sdist and wheel artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: dist path: dist/ - name: Upload git archive artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: archive path: archive/ @@ -73,17 +73,17 @@ jobs: steps: - name: Download sdist and wheel artifacts if: matrix.package != 'archive' - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: dist path: dist/ - name: Download git archive artifact if: matrix.package == 'archive' - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: archive path: archive/ - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3 - name: Display Python version @@ -147,7 +147,7 @@ jobs: submodules: recursive fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -167,7 +167,7 @@ jobs: with: files: cov.xml - name: Upload pytest test results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: test-results.xml @@ -183,7 +183,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3 - name: Display Python version @@ -204,7 +204,7 @@ jobs: id-token: write if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: dist path: dist/ From d752aeb0160951527cef73d67123b16287aea5e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Feb 2024 15:30:31 +0000 Subject: [PATCH 513/702] Build(deps): Bump codecov/codecov-action from 3 to 4 Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3 to 4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v3...v4) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fc9afdc218..93ad63e177 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -162,7 +162,7 @@ jobs: run: tox c - name: Run tox run: tox -v --exit-and-dump-after 1200 - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 if: ${{ always() }} with: files: cov.xml From 6471a889dd9817ea671feacde882c77f20ecb895 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 23 Feb 2024 10:32:25 -0500 Subject: [PATCH 514/702] Update .github/workflows/test.yml --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 93ad63e177..520bd3d8a0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -166,6 +166,7 @@ jobs: if: ${{ always() }} with: files: cov.xml + token: ${{ secrets.CODECOV_TOKEN }} - name: Upload pytest test results uses: actions/upload-artifact@v3 with: From 42dea7a10842c03f4a1a70191b2091f2d7eee9f6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 23 Feb 2024 10:53:02 -0500 Subject: [PATCH 515/702] Update .github/workflows/test.yml --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ac78e7c9cd..d9d644b871 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -169,7 +169,7 @@ jobs: - name: Upload pytest test results uses: actions/upload-artifact@v4 with: - name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} + name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.dependencies }}-${{ matrix.architecture }} path: test-results.xml if: ${{ always() }} From 10ba536d973fb5f0f1bcc09ab568e3bca12dc6e7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 09:36:36 -0500 Subject: [PATCH 516/702] Backport gh-1284: Add tool for generating GitHub-friendly release notes DOC: Fix references in changelog MNT: Add tool for generating GitHub-friendly release notes MNT: Avoid isort version with broken extras --- Changelog | 6 +-- tools/markdown_release_notes.py | 94 +++++++++++++++++++++++++++++++++ tox.ini | 5 +- 3 files changed, 100 insertions(+), 5 deletions(-) create mode 100644 tools/markdown_release_notes.py diff --git a/Changelog b/Changelog index 06cbf74fdf..cd3c2b005b 100644 --- a/Changelog +++ b/Changelog @@ -36,7 +36,7 @@ tested up to Python 3.12 and NumPy 1.26. New features ------------ * Add generic :class:`~nibabel.pointset.Pointset` and regularly spaced - :class:`~nibabel.pointset.NDGrid` data structures in preparation for coordinate + :class:`~nibabel.pointset.Grid` data structures in preparation for coordinate transformation and resampling (pr/1251) (CM, reviewed by Oscar Esteban) Enhancements @@ -44,7 +44,7 @@ Enhancements * Add :meth:`~nibabel.arrayproxy.ArrayProxy.copy` method to :class:`~nibabel.arrayproxy.ArrayProxy` (pr/1255) (CM, reviewed by Paul McCarthy) * Permit :meth:`~nibabel.xmlutils.XmlSerializable.to_xml` to pass keyword - arguments to :meth:`~xml.etree.ElementTree.ElementTree.tostring` (pr/1258) + arguments to :meth:`~xml.etree.ElementTree.tostring` (pr/1258) (CM) * Allow user expansion (e.g., ``~/...``) in strings passed to functions that accept paths (pr/1260) (Reinder Vos de Wael, reviewed by CM) @@ -54,7 +54,7 @@ Enhancements ``affine=None`` argument (pr/1253) (Blake Dewey, reviewed by CM) * Warn on invalid MINC2 spacing declarations, treat as missing (pr/1237) (Peter Suter, reviewed by CM) -* Refactor :func:`~nibabel.nicom.utils.find_private_element` for improved +* Refactor :func:`~nibabel.nicom.utils.find_private_section` for improved readability and maintainability (pr/1228) (MB, reviewed by CM) Bug fixes diff --git a/tools/markdown_release_notes.py b/tools/markdown_release_notes.py new file mode 100644 index 0000000000..66e7876036 --- /dev/null +++ b/tools/markdown_release_notes.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +import re +import sys +from pathlib import Path + +CHANGELOG = Path(__file__).parent.parent / 'Changelog' + +# Match release lines like "5.2.0 (Monday 11 December 2023)" +RELEASE_REGEX = re.compile(r"""((?:\d+)\.(?:\d+)\.(?:\d+)) \(\w+ \d{1,2} \w+ \d{4}\)$""") + + +def main(): + version = sys.argv[1] + output = sys.argv[2] + if output == '-': + output = sys.stdout + else: + output = open(output, 'w') + + release_notes = [] + in_release_notes = False + + with open(CHANGELOG) as f: + for line in f: + match = RELEASE_REGEX.match(line) + if match: + if in_release_notes: + break + in_release_notes = match.group(1) == version + next(f) # Skip the underline + continue + + if in_release_notes: + release_notes.append(line) + + # Drop empty lines at start and end + while release_notes and not release_notes[0].strip(): + release_notes.pop(0) + while release_notes and not release_notes[-1].strip(): + release_notes.pop() + + # Join lines + release_notes = ''.join(release_notes) + + # Remove line breaks when they are followed by a space + release_notes = re.sub(r'\n +', ' ', release_notes) + + # Replace pr/ with # for GitHub + release_notes = re.sub(r'\(pr/(\d+)\)', r'(#\1)', release_notes) + + # Replace :mod:`package.X` with [package.X](...) + release_notes = re.sub( + r':mod:`nibabel\.(.*)`', + r'[nibabel.\1](https://nipy.org/nibabel/reference/nibabel.\1.html)', + release_notes, + ) + # Replace :class/func/attr:`package.module.X` with [package.module.X](...) + release_notes = re.sub( + r':(?:class|func|attr):`(nibabel\.\w*)(\.[\w.]*)?\.(\w+)`', + r'[\1\2.\3](https://nipy.org/nibabel/reference/\1.html#\1\2.\3)', + release_notes, + ) + release_notes = re.sub( + r':(?:class|func|attr):`~(nibabel\.\w*)(\.[\w.]*)?\.(\w+)`', + r'[\3](https://nipy.org/nibabel/reference/\1.html#\1\2.\3)', + release_notes, + ) + # Replace :meth:`package.module.class.X` with [package.module.class.X](...) + release_notes = re.sub( + r':meth:`(nibabel\.[\w.]*)\.(\w+)\.(\w+)`', + r'[\1.\2.\3](https://nipy.org/nibabel/reference/\1.html#\1.\2.\3)', + release_notes, + ) + release_notes = re.sub( + r':meth:`~(nibabel\.[\w.]*)\.(\w+)\.(\w+)`', + r'[\3](https://nipy.org/nibabel/reference/\1.html#\1.\2.\3)', + release_notes, + ) + + def python_doc(match): + module = match.group(1) + name = match.group(2) + return f'[{name}](https://docs.python.org/3/library/{module.lower()}.html#{module}.{name})' + + release_notes = re.sub(r':meth:`~([\w.]+)\.(\w+)`', python_doc, release_notes) + + output.write('## Release notes\n\n') + output.write(release_notes) + + output.close() + + +if __name__ == '__main__': + main() diff --git a/tox.ini b/tox.ini index d91c136fc1..cc2b263cb1 100644 --- a/tox.ini +++ b/tox.ini @@ -141,7 +141,8 @@ labels = check deps = flake8 blue - isort[colors] + # Broken extras, remove when fix is released + isort[colors]!=5.13.1 skip_install = true commands = blue --check --diff --color nibabel @@ -153,7 +154,7 @@ description = Auto-apply style guide to the extent possible labels = pre-release deps = blue - isort[colors] + isort skip_install = true commands = blue nibabel From c74794eb3e2b77f79904b7a2fe6f148c0ead47f1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 10:54:07 -0500 Subject: [PATCH 517/702] DOC: Fix intersphinx mapping and reference type --- Changelog | 4 ++-- doc/source/conf.py | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Changelog b/Changelog index cd3c2b005b..10afc42df8 100644 --- a/Changelog +++ b/Changelog @@ -43,8 +43,8 @@ Enhancements ------------ * Add :meth:`~nibabel.arrayproxy.ArrayProxy.copy` method to :class:`~nibabel.arrayproxy.ArrayProxy` (pr/1255) (CM, reviewed by Paul McCarthy) -* Permit :meth:`~nibabel.xmlutils.XmlSerializable.to_xml` to pass keyword - arguments to :meth:`~xml.etree.ElementTree.tostring` (pr/1258) +* Permit :meth:`~nibabel.xmlutils.XmlSerializable.to_xml` methods to pass keyword + arguments to :func:`xml.etree.ElementTree.tostring` (pr/1258) (CM) * Allow user expansion (e.g., ``~/...``) in strings passed to functions that accept paths (pr/1260) (Reinder Vos de Wael, reviewed by CM) diff --git a/doc/source/conf.py b/doc/source/conf.py index 82fe25adac..175c6340bd 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -280,7 +280,12 @@ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/3/': None} +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'scipy': ('https://docs.scipy.org/doc/scipy', None), + 'matplotlib': ('https://matplotlib.org/stable', None), +} # Config of plot_directive plot_include_source = True From 20d51ddedd223c292401914e2758168eabdee9c0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 11:13:10 -0500 Subject: [PATCH 518/702] MNT: Advertise Python 3.12 support --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 50905dff56..9fec3975cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering", ] # Version from setuptools_scm From 6a2e30c94b2b0df6e5238daba27d6b0edbfe94d5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 12 Dec 2023 21:55:20 -0500 Subject: [PATCH 519/702] Backport gh-1286: Tolerate missing git --- nibabel/pkg_info.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 7e816939d5..7232806a0a 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -1,6 +1,7 @@ from __future__ import annotations import sys +from contextlib import suppress from subprocess import run from packaging.version import Version @@ -102,14 +103,16 @@ def pkg_commit_hash(pkg_path: str | None = None) -> tuple[str, str]: ver = Version(__version__) if ver.local is not None and ver.local.startswith('g'): return 'installation', ver.local[1:8] - # maybe we are in a repository - proc = run( - ('git', 'rev-parse', '--short', 'HEAD'), - capture_output=True, - cwd=pkg_path, - ) - if proc.stdout: - return 'repository', proc.stdout.decode().strip() + # maybe we are in a repository, but consider that we may not have git + with suppress(FileNotFoundError): + proc = run( + ('git', 'rev-parse', '--short', 'HEAD'), + capture_output=True, + cwd=pkg_path, + ) + if proc.stdout: + return 'repository', proc.stdout.decode().strip() + return '(none found)', '' From 83613abe7b17e80ab7ffe8b168f3b44a7f52ff20 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 22 Feb 2024 21:35:21 -0500 Subject: [PATCH 520/702] Backport gh-1297: Accommodate pytest 8 changes --- nibabel/testing/__init__.py | 12 ++++++ nibabel/tests/test_image_api.py | 56 +++++++++------------------ nibabel/tests/test_image_load_save.py | 4 +- nibabel/tests/test_loadsave.py | 26 ++++++------- nibabel/tests/test_onetime.py | 4 +- nibabel/tests/test_orientations.py | 4 +- nibabel/tests/test_spatialimages.py | 12 +++--- 7 files changed, 56 insertions(+), 62 deletions(-) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 5baa5e2b86..21ecadf841 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -233,3 +233,15 @@ def expires(version): return lambda x: x return pytest.mark.xfail(raises=ExpiredDeprecationError) + + +def deprecated_to(version): + """Context manager to expect DeprecationWarnings until a given version""" + from packaging.version import Version + + from nibabel import __version__ as nbver + + if Version(nbver) < Version(version): + return pytest.deprecated_call() + + return nullcontext() diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index f1fc720716..86c04985f8 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -48,6 +48,7 @@ bytesio_filemap, bytesio_round_trip, clear_and_catch_warnings, + deprecated_to, expires, nullcontext, ) @@ -80,10 +81,6 @@ from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES -def maybe_deprecated(meth_name): - return pytest.deprecated_call() if meth_name == 'get_data' else nullcontext() - - class GenericImageAPI(ValidateAPI): """General image validation API""" @@ -194,7 +191,7 @@ def validate_no_slicing(self, imaker, params): @expires('5.0.0') def validate_get_data_deprecated(self, imaker, params): img = imaker() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): data = img.get_data() assert_array_equal(np.asanyarray(img.dataobj), data) @@ -246,14 +243,12 @@ def validate_data_interface(self, imaker, params): self._check_array_interface(imaker, meth_name) method = getattr(img, meth_name) # Data shape is same as image shape - with maybe_deprecated(meth_name): - assert img.shape == method().shape + assert img.shape == method().shape # Data ndim is same as image ndim - with maybe_deprecated(meth_name): - assert img.ndim == method().ndim + assert img.ndim == method().ndim # Values to get_data caching parameter must be 'fill' or # 'unchanged' - with maybe_deprecated(meth_name), pytest.raises(ValueError): + with pytest.raises(ValueError): method(caching='something') # dataobj is read only fake_data = np.zeros(img.shape, dtype=img.get_data_dtype()) @@ -277,13 +272,11 @@ def _check_proxy_interface(self, imaker, meth_name): assert not img.in_memory # Load with caching='unchanged' method = getattr(img, meth_name) - with maybe_deprecated(meth_name): - data = method(caching='unchanged') + data = method(caching='unchanged') # Still not cached assert not img.in_memory # Default load, does caching - with maybe_deprecated(meth_name): - data = method() + data = method() # Data now cached. in_memory is True if either of the get_data # or get_fdata caches are not-None assert img.in_memory @@ -295,36 +288,30 @@ def _check_proxy_interface(self, imaker, meth_name): # integers, but lets assume that's not true here. assert_array_equal(proxy_data, data) # Now caching='unchanged' does nothing, returns cached version - with maybe_deprecated(meth_name): - data_again = method(caching='unchanged') + data_again = method(caching='unchanged') assert data is data_again # caching='fill' does nothing because the cache is already full - with maybe_deprecated(meth_name): - data_yet_again = method(caching='fill') + data_yet_again = method(caching='fill') assert data is data_yet_again # changing array data does not change proxy data, or reloaded # data data[:] = 42 assert_array_equal(proxy_data, proxy_copy) assert_array_equal(np.asarray(img.dataobj), proxy_copy) - # It does change the result of get_data - with maybe_deprecated(meth_name): - assert_array_equal(method(), 42) + # It does change the result of get_fdata + assert_array_equal(method(), 42) # until we uncache img.uncache() # Which unsets in_memory assert not img.in_memory - with maybe_deprecated(meth_name): - assert_array_equal(method(), proxy_copy) + assert_array_equal(method(), proxy_copy) # Check caching='fill' does cache data img = imaker() method = getattr(img, meth_name) assert not img.in_memory - with maybe_deprecated(meth_name): - data = method(caching='fill') + data = method(caching='fill') assert img.in_memory - with maybe_deprecated(meth_name): - data_again = method() + data_again = method() assert data is data_again # Check that caching refreshes for new floating point type. img.uncache() @@ -368,8 +355,7 @@ def _check_array_caching(self, imaker, meth_name, caching): get_data_func = method if caching is None else partial(method, caching=caching) assert isinstance(img.dataobj, np.ndarray) assert img.in_memory - with maybe_deprecated(meth_name): - data = get_data_func() + data = get_data_func() # Returned data same object as underlying dataobj if using # old ``get_data`` method, or using newer ``get_fdata`` # method, where original array was float64. @@ -377,8 +363,7 @@ def _check_array_caching(self, imaker, meth_name, caching): dataobj_is_data = arr_dtype == np.float64 or method == img.get_data # Set something to the output array. data[:] = 42 - with maybe_deprecated(meth_name): - get_result_changed = np.all(get_data_func() == 42) + get_result_changed = np.all(get_data_func() == 42) assert get_result_changed == (dataobj_is_data or caching != 'unchanged') if dataobj_is_data: assert data is img.dataobj @@ -387,15 +372,13 @@ def _check_array_caching(self, imaker, meth_name, caching): assert_array_equal(np.asarray(img.dataobj), 42) # Uncache has no effect img.uncache() - with maybe_deprecated(meth_name): - assert_array_equal(get_data_func(), 42) + assert_array_equal(get_data_func(), 42) else: assert not data is img.dataobj assert not np.all(np.asarray(img.dataobj) == 42) # Uncache does have an effect img.uncache() - with maybe_deprecated(meth_name): - assert not np.all(get_data_func() == 42) + assert not np.all(get_data_func() == 42) # in_memory is always true for array images, regardless of # cache state. img.uncache() @@ -408,8 +391,7 @@ def _check_array_caching(self, imaker, meth_name, caching): if arr_dtype not in float_types: return for float_type in float_types: - with maybe_deprecated(meth_name): - data = get_data_func(dtype=float_type) + data = get_data_func(dtype=float_type) assert (data is img.dataobj) == (arr_dtype == float_type) def validate_shape(self, imaker, params): diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 962a2433bf..706a87f10f 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -40,7 +40,7 @@ from .. import spm99analyze as spm99 from ..optpkg import optional_package from ..spatialimages import SpatialImage -from ..testing import expires +from ..testing import deprecated_to, expires from ..tmpdirs import InTemporaryDirectory from ..volumeutils import native_code, swapped_code @@ -285,7 +285,7 @@ def test_filename_save(): @expires('5.0.0') def test_guessed_image_type(): # Test whether we can guess the image type from example files - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert nils.guessed_image_type(pjoin(DATA_PATH, 'example4d.nii.gz')) == Nifti1Image assert nils.guessed_image_type(pjoin(DATA_PATH, 'nifti1.hdr')) == Nifti1Pair assert nils.guessed_image_type(pjoin(DATA_PATH, 'example_nifti2.nii.gz')) == Nifti2Image diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 4071b09f72..401ed04535 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -21,7 +21,7 @@ from ..loadsave import _signature_matches_extension, load, read_img_data from ..openers import Opener from ..optpkg import optional_package -from ..testing import expires +from ..testing import deprecated_to, expires from ..tmpdirs import InTemporaryDirectory _, have_scipy, _ = optional_package('scipy') @@ -50,14 +50,14 @@ def test_read_img_data(): fpath = pathlib.Path(fpath) img = load(fpath) data = img.get_fdata() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): data2 = read_img_data(img) assert_array_equal(data, data2) # These examples have null scaling - assert prefer=unscaled is the same dao = img.dataobj if hasattr(dao, 'slope') and hasattr(img.header, 'raw_data_from_fileobj'): assert (dao.slope, dao.inter) == (1, 0) - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(read_img_data(img, prefer='unscaled'), data) # Assert all caps filename works as well with TemporaryDirectory() as tmpdir: @@ -140,21 +140,21 @@ def test_read_img_data_nifti(): img = img_class(data, np.eye(4)) img.set_data_dtype(out_dtype) # No filemap => error - with pytest.deprecated_call(), pytest.raises(ImageFileError): + with deprecated_to('5.0.0'), pytest.raises(ImageFileError): read_img_data(img) # Make a filemap froot = f'an_image_{i}' img.file_map = img.filespec_to_file_map(froot) # Trying to read from this filemap will generate an error because # we are going to read from files that do not exist - with pytest.deprecated_call(), pytest.raises(OSError): + with deprecated_to('5.0.0'), pytest.raises(OSError): read_img_data(img) img.to_file_map() # Load - now the scaling and offset correctly applied img_fname = img.file_map['image'].filename img_back = load(img_fname) data_back = img_back.get_fdata() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(data_back, read_img_data(img_back)) # This is the same as if we loaded the image and header separately hdr_fname = img.file_map['header'].filename if 'header' in img.file_map else img_fname @@ -166,16 +166,16 @@ def test_read_img_data_nifti(): # Unscaled is the same as returned from raw_data_from_fileobj with open(img_fname, 'rb') as fobj: unscaled_back = hdr_back.raw_data_from_fileobj(fobj) - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(unscaled_back, read_img_data(img_back, prefer='unscaled')) # If we futz with the scaling in the header, the result changes - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(data_back, read_img_data(img_back)) has_inter = hdr_back.has_data_intercept old_slope = hdr_back['scl_slope'] old_inter = hdr_back['scl_inter'] if has_inter else 0 est_unscaled = (data_back - old_inter) / old_slope - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): actual_unscaled = read_img_data(img_back, prefer='unscaled') assert_almost_equal(est_unscaled, actual_unscaled) img_back.header['scl_slope'] = 2.1 @@ -185,10 +185,10 @@ def test_read_img_data_nifti(): else: new_inter = 0 # scaled scaling comes from new parameters in header - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert np.allclose(actual_unscaled * 2.1 + new_inter, read_img_data(img_back)) # Unscaled array didn't change - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(actual_unscaled, read_img_data(img_back, prefer='unscaled')) # Check the offset too img.header.set_data_offset(1024) @@ -200,14 +200,14 @@ def test_read_img_data_nifti(): fobj.write(b'\x00\x00') img_back = load(img_fname) data_back = img_back.get_fdata() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(data_back, read_img_data(img_back)) img_back.header.set_data_offset(1026) # Check we pick up new offset exp_offset = np.zeros((data.size,), data.dtype) + old_inter exp_offset[:-1] = np.ravel(data_back, order='F')[1:] exp_offset = np.reshape(exp_offset, shape, order='F') - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert_array_equal(exp_offset, read_img_data(img_back)) # Delete stuff that might hold onto file references del img, img_back, data_back diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index 426702fa43..b22a4ef3ec 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,12 +1,12 @@ import pytest from nibabel.onetime import auto_attr, setattr_on_read -from nibabel.testing import expires +from nibabel.testing import deprecated_to, expires @expires('5.0.0') def test_setattr_on_read(): - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): class MagicProp: @setattr_on_read diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 8821fac0e0..0094711e79 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -26,7 +26,7 @@ ornt2axcodes, ornt_transform, ) -from ..testing import expires +from ..testing import deprecated_to, expires IN_ARRS = [ np.eye(4), @@ -407,6 +407,6 @@ def test_inv_ornt_aff(): def test_flip_axis_deprecation(): a = np.arange(24).reshape((2, 3, 4)) axis = 1 - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): a_flipped = flip_axis(a, axis) assert_array_equal(a_flipped, np.flip(a, axis)) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 5cad23a22f..7157d5c459 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -18,7 +18,7 @@ from .. import load as top_load from ..imageclasses import spatial_axes_first from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage -from ..testing import bytesio_round_trip, expires, memmap_after_ufunc +from ..testing import bytesio_round_trip, deprecated_to, expires, memmap_after_ufunc from ..tmpdirs import InTemporaryDirectory @@ -368,7 +368,7 @@ def test_get_data(self): in_data = in_data_template.copy() img = img_klass(in_data, None) assert in_data is img.dataobj - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): out_data = img.get_data() assert in_data is out_data # and that uncache has no effect @@ -381,18 +381,18 @@ def test_get_data(self): rt_img = bytesio_round_trip(img) assert in_data is not rt_img.dataobj assert (rt_img.dataobj == in_data).all() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): out_data = rt_img.get_data() assert (out_data == in_data).all() assert rt_img.dataobj is not out_data # cache - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert rt_img.get_data() is out_data out_data[:] = 42 rt_img.uncache() - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert rt_img.get_data() is not out_data - with pytest.deprecated_call(): + with deprecated_to('5.0.0'): assert (rt_img.get_data() == in_data).all() def test_slicer(self): From 5e4f2f9f84c41f37233e9092395e1d2b58c3c9cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Feb 2024 15:30:27 +0000 Subject: [PATCH 521/702] Build(deps): Bump the actions-infrastructure group with 3 updates Bumps the actions-infrastructure group with 3 updates: [actions/setup-python](https://github.com/actions/setup-python), [actions/upload-artifact](https://github.com/actions/upload-artifact) and [actions/download-artifact](https://github.com/actions/download-artifact). Updates `actions/setup-python` from 4 to 5 - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v4...v5) Updates `actions/upload-artifact` from 3 to 4 - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4) Updates `actions/download-artifact` from 3 to 4 - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major dependency-group: actions-infrastructure - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major dependency-group: actions-infrastructure - dependency-name: actions/download-artifact dependency-type: direct:production update-type: version-update:semver-major dependency-group: actions-infrastructure ... Signed-off-by: dependabot[bot] Update .github/workflows/test.yml --- .github/workflows/test.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fc9afdc218..d9d644b871 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,7 +44,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3 - run: pip install --upgrade build twine @@ -54,12 +54,12 @@ jobs: - name: Build git archive run: mkdir archive && git archive -v -o archive/nibabel-archive.tgz HEAD - name: Upload sdist and wheel artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: dist path: dist/ - name: Upload git archive artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: archive path: archive/ @@ -73,17 +73,17 @@ jobs: steps: - name: Download sdist and wheel artifacts if: matrix.package != 'archive' - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: dist path: dist/ - name: Download git archive artifact if: matrix.package == 'archive' - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: archive path: archive/ - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3 - name: Display Python version @@ -147,7 +147,7 @@ jobs: submodules: recursive fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -167,9 +167,9 @@ jobs: with: files: cov.xml - name: Upload pytest test results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} + name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.dependencies }}-${{ matrix.architecture }} path: test-results.xml if: ${{ always() }} @@ -183,7 +183,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3 - name: Display Python version @@ -204,7 +204,7 @@ jobs: id-token: write if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: dist path: dist/ From c1ac82936abc448f8e174ec48154b00e43da8fc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Feb 2024 15:30:31 +0000 Subject: [PATCH 522/702] Build(deps): Bump codecov/codecov-action from 3 to 4 Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3 to 4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v3...v4) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Update .github/workflows/test.yml --- .github/workflows/test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d9d644b871..a6eb39734f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -162,10 +162,11 @@ jobs: run: tox c - name: Run tox run: tox -v --exit-and-dump-after 1200 - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 if: ${{ always() }} with: files: cov.xml + token: ${{ secrets.CODECOV_TOKEN }} - name: Upload pytest test results uses: actions/upload-artifact@v4 with: From ee684ebbb3afbe408f4d7abb1185a24573bbae0f Mon Sep 17 00:00:00 2001 From: manifest-rules Date: Fri, 23 Feb 2024 09:57:36 +0000 Subject: [PATCH 523/702] Backport gh-1298: Support "flat" ASCII-encoded GIFTI DataArrays TEST: Unit test for loading ASCII-encoded "flat" GIFTI data array. Currently failing RF: Make sure that ASCII-encoded DataArrays are returned with expected shape RF: Consistently apply data type, shape and index order in GIFTI data blocks TEST: Expect data arrays to be the advertised shapes --- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/parse_gifti_fast.py | 66 ++++++++--------- nibabel/gifti/tests/data/ascii_flat_data.gii | 76 ++++++++++++++++++++ nibabel/gifti/tests/test_parse_gifti_fast.py | 28 ++++++-- 4 files changed, 130 insertions(+), 42 deletions(-) create mode 100644 nibabel/gifti/tests/data/ascii_flat_data.gii diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 76bad4677a..7aba877309 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -745,7 +745,7 @@ def agg_data(self, intent_code=None): >>> triangles_2 = surf_img.agg_data('triangle') >>> triangles_3 = surf_img.agg_data(1009) # Numeric code for pointset >>> print(np.array2string(triangles)) - [0 1 2] + [[0 1 2]] >>> np.array_equal(triangles, triangles_2) True >>> np.array_equal(triangles, triangles_3) diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 7d8eacb825..ccd608324a 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -68,17 +68,21 @@ def read_data_block(darray, fname, data, mmap): if mmap is True: mmap = 'c' enclabel = gifti_encoding_codes.label[darray.encoding] - dtype = data_type_codes.type[darray.datatype] + if enclabel not in ('ASCII', 'B64BIN', 'B64GZ', 'External'): + raise GiftiParseError(f'Unknown encoding {darray.encoding}') + + # Encode the endianness in the dtype + byteorder = gifti_endian_codes.byteorder[darray.endian] + dtype = data_type_codes.dtype[darray.datatype].newbyteorder(byteorder) + + shape = tuple(darray.dims) + order = array_index_order_codes.npcode[darray.ind_ord] + + # GIFTI_ENCODING_ASCII if enclabel == 'ASCII': - # GIFTI_ENCODING_ASCII - c = StringIO(data) - da = np.loadtxt(c, dtype=dtype) - return da # independent of the endianness - elif enclabel not in ('B64BIN', 'B64GZ', 'External'): - return 0 - - # GIFTI_ENCODING_EXTBIN + return np.loadtxt(StringIO(data), dtype=dtype, ndmin=1).reshape(shape, order=order) + # We assume that the external data file is raw uncompressed binary, with # the data type/endianness/ordering specified by the other DataArray # attributes @@ -94,12 +98,13 @@ def read_data_block(darray, fname, data, mmap): newarr = None if mmap: try: - newarr = np.memmap( + return np.memmap( ext_fname, dtype=dtype, mode=mmap, offset=darray.ext_offset, - shape=tuple(darray.dims), + shape=shape, + order=order, ) # If the memmap fails, we ignore the error and load the data into # memory below @@ -107,13 +112,12 @@ def read_data_block(darray, fname, data, mmap): pass # mmap=False or np.memmap failed if newarr is None: - # We can replace this with a call to np.fromfile in numpy>=1.17, - # as an "offset" parameter was added in that version. - with open(ext_fname, 'rb') as f: - f.seek(darray.ext_offset) - nbytes = np.prod(darray.dims) * dtype().itemsize - buff = f.read(nbytes) - newarr = np.frombuffer(buff, dtype=dtype) + return np.fromfile( + ext_fname, + dtype=dtype, + count=np.prod(darray.dims), + offset=darray.ext_offset, + ).reshape(shape, order=order) # Numpy arrays created from bytes objects are read-only. # Neither b64decode nor decompress will return bytearrays, and there @@ -121,26 +125,14 @@ def read_data_block(darray, fname, data, mmap): # there is not a simple way to avoid making copies. # If this becomes a problem, we should write a decoding interface with # a tunable chunk size. + dec = base64.b64decode(data.encode('ascii')) + if enclabel == 'B64BIN': + buff = bytearray(dec) else: - dec = base64.b64decode(data.encode('ascii')) - if enclabel == 'B64BIN': - # GIFTI_ENCODING_B64BIN - buff = bytearray(dec) - else: - # GIFTI_ENCODING_B64GZ - buff = bytearray(zlib.decompress(dec)) - del dec - newarr = np.frombuffer(buff, dtype=dtype) - - sh = tuple(darray.dims) - if len(newarr.shape) != len(sh): - newarr = newarr.reshape(sh, order=array_index_order_codes.npcode[darray.ind_ord]) - - # check if we need to byteswap - required_byteorder = gifti_endian_codes.byteorder[darray.endian] - if required_byteorder in ('big', 'little') and required_byteorder != sys.byteorder: - newarr = newarr.byteswap() - return newarr + # GIFTI_ENCODING_B64GZ + buff = bytearray(zlib.decompress(dec)) + del dec + return np.frombuffer(buff, dtype=dtype).reshape(shape, order=order) def _str2int(in_str): diff --git a/nibabel/gifti/tests/data/ascii_flat_data.gii b/nibabel/gifti/tests/data/ascii_flat_data.gii new file mode 100644 index 0000000000..26a73fba02 --- /dev/null +++ b/nibabel/gifti/tests/data/ascii_flat_data.gii @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 + + 155.17539978 135.58103943 98.30715179 140.33973694 190.0491333 73.24776459 157.3598938 196.97969055 83.65809631 171.46174622 137.43661499 78.4709549 148.54592896 97.06752777 65.96373749 123.45701599 111.46841431 66.3571167 135.30892944 202.28720093 36.38148499 178.28155518 162.59469604 37.75128937 178.11087036 115.28820038 57.17986679 142.81582642 82.82115173 31.02205276 + + + + + + + + + + + + + 6402 17923 25602 14085 25602 17923 25602 14085 4483 17923 1602 14085 4483 25603 25602 25604 25602 25603 25602 25604 6402 25603 3525 25604 1123 17922 12168 25604 12168 17922 + + diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index f08bdd1b17..f972425679 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -39,9 +39,19 @@ DATA_FILE5 = pjoin(IO_DATA_PATH, 'base64bin.gii') DATA_FILE6 = pjoin(IO_DATA_PATH, 'rh.aparc.annot.gii') DATA_FILE7 = pjoin(IO_DATA_PATH, 'external.gii') - -datafiles = [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6, DATA_FILE7] -numDA = [2, 1, 1, 1, 2, 1, 2] +DATA_FILE8 = pjoin(IO_DATA_PATH, 'ascii_flat_data.gii') + +datafiles = [ + DATA_FILE1, + DATA_FILE2, + DATA_FILE3, + DATA_FILE4, + DATA_FILE5, + DATA_FILE6, + DATA_FILE7, + DATA_FILE8, +] +numDA = [2, 1, 1, 1, 2, 1, 2, 2] DATA_FILE1_darr1 = np.array( [ @@ -50,7 +60,7 @@ [-17.614349, -65.401642, 21.071466], ] ) -DATA_FILE1_darr2 = np.array([0, 1, 2]) +DATA_FILE1_darr2 = np.array([[0, 1, 2]]) DATA_FILE2_darr1 = np.array( [ @@ -152,6 +162,10 @@ dtype=np.int32, ) +DATA_FILE8_darr1 = np.copy(DATA_FILE5_darr1) + +DATA_FILE8_darr2 = np.copy(DATA_FILE5_darr2) + def assert_default_types(loaded): default = loaded.__class__() @@ -448,3 +462,9 @@ def test_load_compressed(): img7 = load(fn) assert_array_almost_equal(img7.darrays[0].data, DATA_FILE7_darr1) assert_array_almost_equal(img7.darrays[1].data, DATA_FILE7_darr2) + + +def test_load_flat_ascii_data(): + img = load(DATA_FILE8) + assert_array_almost_equal(img.darrays[0].data, DATA_FILE8_darr1) + assert_array_almost_equal(img.darrays[1].data, DATA_FILE8_darr2) From 9408ae8e5bb4705a9b390729f637bcda6f3ee56d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 8 Feb 2024 08:58:57 -0500 Subject: [PATCH 524/702] Backport gh-1296: Conditionally drop derived volumes from DWI sequences DATA: Add dcm_qa_xa30 as submodule for test data TEST: Add test for Siemens TRACE volume FIX: Conditionally drop isotropic frames --- .gitmodules | 3 +++ nibabel-data/dcm_qa_xa30 | 1 + nibabel/nicom/dicomwrappers.py | 5 ++++- nibabel/nicom/tests/test_dicomwrappers.py | 12 ++++++++++++ 4 files changed, 20 insertions(+), 1 deletion(-) create mode 160000 nibabel-data/dcm_qa_xa30 diff --git a/.gitmodules b/.gitmodules index cdcef650f1..20e97c2ebb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -19,3 +19,6 @@ [submodule "nibabel-data/nitest-dicom"] path = nibabel-data/nitest-dicom url = https://github.com/effigies/nitest-dicom +[submodule "nibabel-data/dcm_qa_xa30"] + path = nibabel-data/dcm_qa_xa30 + url = https://github.com/neurolabusc/dcm_qa_xa30.git diff --git a/nibabel-data/dcm_qa_xa30 b/nibabel-data/dcm_qa_xa30 new file mode 160000 index 0000000000..89b2509218 --- /dev/null +++ b/nibabel-data/dcm_qa_xa30 @@ -0,0 +1 @@ +Subproject commit 89b2509218a6dd021c5d40ddaf2a017ac1bacafc diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 42d4b1413f..5ff4f33052 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -509,11 +509,14 @@ def image_shape(self): if hasattr(first_frame, 'get') and first_frame.get([0x18, 0x9117]): # DWI image may include derived isotropic, ADC or trace volume try: - self.frames = pydicom.Sequence( + anisotropic = pydicom.Sequence( frame for frame in self.frames if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' ) + # Image contains DWI volumes followed by derived images; remove derived images + if len(anisotropic) != 0: + self.frames = anisotropic except IndexError: # Sequence tag is found but missing items! raise WrapperError('Diffusion file missing information') diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 083357537e..5c29349362 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -35,6 +35,11 @@ DATA_FILE_EMPTY_ST = pjoin(IO_DATA_PATH, 'slicethickness_empty_string.dcm') DATA_FILE_4D_DERIVED = pjoin(get_nibabel_data(), 'nitest-dicom', '4d_multiframe_with_derived.dcm') DATA_FILE_CT = pjoin(get_nibabel_data(), 'nitest-dicom', 'siemens_ct_header_csa.dcm') +DATA_FILE_SIEMENS_TRACE = pjoin( + get_nibabel_data(), + 'dcm_qa_xa30', + 'In/20_DWI_dir80_AP/0001_1.3.12.2.1107.5.2.43.67093.2022071112140611403312307.dcm', +) # This affine from our converted image was shown to match our image spatially # with an image from SPM DICOM conversion. We checked the matching with SPM @@ -656,6 +661,13 @@ def test_data_derived_shape(self): with pytest.warns(UserWarning, match='Derived images found and removed'): assert dw.image_shape == (96, 96, 60, 33) + @dicom_test + @needs_nibabel_data('dcm_qa_xa30') + def test_data_trace(self): + # Test that a standalone trace volume is found and not dropped + dw = didw.wrapper_from_file(DATA_FILE_SIEMENS_TRACE) + assert dw.image_shape == (72, 72, 39, 1) + @dicom_test @needs_nibabel_data('nitest-dicom') def test_data_unreadable_private_headers(self): From 1df3b610e6e501d6aa000a8076ec23a21701dafe Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 26 Feb 2024 22:41:57 -0500 Subject: [PATCH 525/702] REL: 5.2.1 --- Changelog | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/Changelog b/Changelog index 10afc42df8..6892951256 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,28 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +5.2.1 (Monday 26 February 2024) +=============================== + +Bug-fix release in the 5.2.x series. + +Enhancements +------------ +* Support "flat" ASCII-encoded GIFTI DataArrays (pr/1298) (PM, reviewed by CM) + +Bug fixes +--------- +* Tolerate missing ``git`` when reporting version info (pr/1286) (CM, reviewed by + Yuri Victorovich) +* Handle Siemens XA30 derived DWI DICOMs (pr/1296) (CM, reviewed by YOH and + Mathias Goncalves) + +Maintenance +----------- +* Add tool for generating GitHub-friendly release notes (pr/1284) (CM) +* Accommodate pytest 8 changes (pr/1297) (CM) + + 5.2.0 (Monday 11 December 2023) =============================== From 75692191fc7763feea35ee2c439a04d42d357f9b Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Tue, 27 Feb 2024 09:19:58 -0500 Subject: [PATCH 526/702] Make "Calculated shape" more "correct" (do show shape) and informative --- nibabel/nicom/dicomwrappers.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 5ff4f33052..7e8f7201a8 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -565,8 +565,11 @@ def image_shape(self): ns_unique = [len(np.unique(row)) for row in self._frame_indices.T] shape = (rows, cols) + tuple(ns_unique) n_vols = np.prod(shape[3:]) - if n_frames != n_vols * shape[2]: - raise WrapperError('Calculated shape does not match number of frames.') + n_frames_calc = n_vols * shape[2] + if n_frames != n_frames_calc: + raise WrapperError( + f'Calculated # of frames ({n_frames_calc}={n_vols}*{shape[2]}) of shape {shape} does not ' + f'match NumberOfFrames {n_frames}.') return tuple(shape) @one_time From d063b95a83bc2fba49d083a96235e60b3a0035c1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 28 Feb 2024 09:40:14 -0500 Subject: [PATCH 527/702] STY: blue/flake8 --- nibabel/nicom/dicomwrappers.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 7e8f7201a8..a5ea550d87 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -568,8 +568,9 @@ def image_shape(self): n_frames_calc = n_vols * shape[2] if n_frames != n_frames_calc: raise WrapperError( - f'Calculated # of frames ({n_frames_calc}={n_vols}*{shape[2]}) of shape {shape} does not ' - f'match NumberOfFrames {n_frames}.') + f'Calculated # of frames ({n_frames_calc}={n_vols}*{shape[2]}) ' + f'of shape {shape} does not match NumberOfFrames {n_frames}.' + ) return tuple(shape) @one_time From e4facc17fbebeb92fa6fed600b9a349c6e373ee3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 4 Mar 2024 09:10:12 -0500 Subject: [PATCH 528/702] PIN: Temporarily pin pytest<8.1, pending scientific-python/pytest-doctestplus#239 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e92c465e0d..3cd81f93e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,7 +67,7 @@ doc = [ "tomli; python_version < '3.11'", ] test = [ - "pytest", + "pytest<8.1", # relax once pytest-doctestplus releases 1.2.0 "pytest-doctestplus", "pytest-cov", "pytest-httpserver", From 2bad8cce331976af3e8b42cecaed76bb075ee8b3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 5 Mar 2024 08:16:05 -0500 Subject: [PATCH 529/702] FIX: Use np.asarray instead of np.array(..., copy=False) --- nibabel/affines.py | 2 +- nibabel/casting.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/affines.py b/nibabel/affines.py index 05fdd7bb58..1478fd2dca 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -365,7 +365,7 @@ def rescale_affine(affine, shape, zooms, new_shape=None): A new affine transform with the specified voxel sizes """ - shape = np.array(shape, copy=False) + shape = np.asarray(shape) new_shape = np.array(new_shape if new_shape is not None else shape) s = voxel_sizes(affine) diff --git a/nibabel/casting.py b/nibabel/casting.py index f3e04f30f4..09015135f2 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -611,7 +611,7 @@ def int_abs(arr): >>> int_abs(np.array([-128, 127], dtype=np.float32)) array([128., 127.], dtype=float32) """ - arr = np.array(arr, copy=False) + arr = np.asarray(arr) dt = arr.dtype if dt.kind == 'u': return arr From 1d984adf83f41f328324af8eb917fec68e6dfbd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Mon, 11 Mar 2024 20:10:35 -0400 Subject: [PATCH 530/702] DOC: Update affiliation of jhlegarreta Update affiliation of jhlegarreta. --- .zenodo.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index 6cadd84a7a..a30467ebe0 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -270,7 +270,7 @@ "orcid": "0000-0003-1076-5122" }, { - "affiliation": "Universit\u00e9 de Sherbrooke", + "affiliation": "Brigham and Women's Hospital, Mass General Brigham/Harvard Medical School", "name": "Legarreta, Jon Haitz", "orcid": "0000-0002-9661-1396" }, From f23ca14310724897fb24f8061eeee2dc382cf2cc Mon Sep 17 00:00:00 2001 From: Joshua Newton Date: Fri, 22 Mar 2024 17:48:06 -0400 Subject: [PATCH 531/702] `casting.py`: Filter WSL1 + np.longdouble warning This commit filters the following warning: > UserWarning: Signature b'\x00\xd0\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf\x00\x00\x00\x00\x00\x00' for > does not match any known type: falling back to type probe function. > This warnings [sic] indicates broken support for the dtype! > machar = _get_machar(dtype) To ensure that this warning is only filtered on WSL1, we try to detect WSL by checking for a WSL-specific string from the uname, which appears to be endorsed by WSL devs. (https://github.com/microsoft/WSL/issues/4555#issuecomment-700315063) I also tried checking the `WSL_INTEROP` and `WSL_DISTRO_NAME` environment variables as suggested in the above linked issues, but I preferred reusing the `platform` module that was already imported inside `casting.py`. There is perhaps a more thorough approach where we collect all raised warnings, test the collected warnings, etc. but I didn't want to overcomplicate things. --- nibabel/casting.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 09015135f2..ec86089576 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -6,7 +6,7 @@ from __future__ import annotations import warnings -from platform import machine, processor +from platform import machine, processor, uname import numpy as np @@ -274,7 +274,15 @@ def type_info(np_type): nexp=None, width=width, ) - info = np.finfo(dt) + # Mitigate warning from WSL1 when checking `np.longdouble` (#1309) + # src for '-Microsoft': https://github.com/microsoft/WSL/issues/4555#issuecomment-536862561 + with warnings.catch_warnings(): + if uname().release.endswith('-Microsoft'): + warnings.filterwarnings( + action='ignore', category=UserWarning, message='Signature.*numpy.longdouble' + ) + info = np.finfo(dt) + # Trust the standard IEEE types nmant, nexp = info.nmant, info.nexp ret = dict( From 50dd737089d46adc1bd5c0e7f97d137c10cb1166 Mon Sep 17 00:00:00 2001 From: Joshua Newton Date: Sat, 23 Mar 2024 13:41:02 -0400 Subject: [PATCH 532/702] `casting.py`: Remove `uname` check for WSL1 --- nibabel/casting.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index ec86089576..77da57e406 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -6,7 +6,7 @@ from __future__ import annotations import warnings -from platform import machine, processor, uname +from platform import machine, processor import numpy as np @@ -275,12 +275,10 @@ def type_info(np_type): width=width, ) # Mitigate warning from WSL1 when checking `np.longdouble` (#1309) - # src for '-Microsoft': https://github.com/microsoft/WSL/issues/4555#issuecomment-536862561 with warnings.catch_warnings(): - if uname().release.endswith('-Microsoft'): - warnings.filterwarnings( - action='ignore', category=UserWarning, message='Signature.*numpy.longdouble' - ) + warnings.filterwarnings( + action='ignore', category=UserWarning, message='Signature.*numpy.longdouble' + ) info = np.finfo(dt) # Trust the standard IEEE types From 2978ee8ee45cf8c935b91a5a2e3268406f8f24f6 Mon Sep 17 00:00:00 2001 From: Joshua Newton Date: Sat, 23 Mar 2024 18:58:47 -0400 Subject: [PATCH 533/702] `.zenodo.json`: Add Joshua Newton --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index a30467ebe0..553aba0548 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -391,6 +391,11 @@ }, { "name": "freec84" + }, + { + "affiliation": "Polytechnique Montréal, Montréal, CA", + "name": "Newton, Joshua", + "orcid": "0009-0005-6963-3812" } ], "keywords": [ From 733c0f36af71808185245617a156b3e7b4bd26a2 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 26 Dec 2023 14:00:58 +0100 Subject: [PATCH 534/702] =?UTF-8?q?MNT:=20blue/isort/flake8=20=E2=86=92=20?= =?UTF-8?q?ruff?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .pre-commit-config.yaml | 19 +++++++------------ pyproject.toml | 39 +++++++++++++++++++++++++-------------- tox.ini | 17 ++++++----------- 3 files changed, 38 insertions(+), 37 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2b620a6de3..ef2d891fbd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,19 +12,14 @@ repos: - id: check-case-conflict - id: check-merge-conflict - id: check-vcs-permalinks - - repo: https://github.com/grantjenks/blue - rev: v0.9.1 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.9 hooks: - - id: blue - - repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - - repo: https://github.com/pycqa/flake8 - rev: 6.1.0 - hooks: - - id: flake8 - exclude: "^(doc|nisext|tools)/" + - id: ruff + args: [--fix, --show-fix, --exit-non-zero-on-fix] + exclude: = ["doc", "tools"] + - id: ruff-format + exclude: = ["doc", "tools"] - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.5.1 hooks: diff --git a/pyproject.toml b/pyproject.toml index 3cd81f93e5..515c35850b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,21 +109,32 @@ __version__ = version = {version!r} __version_tuple__ = version_tuple = {version_tuple!r} ''' -[tool.blue] -line_length = 99 -target-version = ["py38"] -force-exclude = """ -( - _version.py - | nibabel/externals/ - | versioneer.py -) -""" +[tool.ruff] +line-length = 99 +exclude = ["doc", "nibabel/externals", "tools", "version.py", "versioneer.py"] -[tool.isort] -profile = "black" -line_length = 99 -extend_skip = ["_version.py", "externals"] +[tool.ruff.lint] +select = ["F", "I", "Q"] +ignore = [ + # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules + "W191", + "E111", + "E114", + "E117", + "D206", + "D300", + "Q000", + "Q001", + "Q002", + "Q003", + "COM812", + "COM819", + "ISC001", + "ISC002", +] + +[tool.ruff.format] +quote-style = "single" [tool.mypy] python_version = "3.11" diff --git a/tox.ini b/tox.ini index cc2b263cb1..4e9b220ce8 100644 --- a/tox.ini +++ b/tox.ini @@ -139,26 +139,21 @@ commands = description = Check our style guide labels = check deps = - flake8 - blue - # Broken extras, remove when fix is released - isort[colors]!=5.13.1 + ruff>=0.1.9 skip_install = true commands = - blue --check --diff --color nibabel - isort --check --diff --color nibabel - flake8 nibabel + ruff --diff nibabel + ruff format --diff nibabel [testenv:style-fix] description = Auto-apply style guide to the extent possible labels = pre-release deps = - blue - isort + ruff skip_install = true commands = - blue nibabel - isort nibabel + ruff --fix nibabel + ruff format nibabel [testenv:spellcheck] description = Check spelling From 39429f9708ede298088c1a9206fca83ef2b73b49 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 26 Dec 2023 15:17:14 +0100 Subject: [PATCH 535/702] MNT: run `ruff --fix` and `ruf format` Also fix remaining issues manually. --- doc/source/conf.py | 4 ++-- doc/tools/apigen.py | 1 - doc/tools/build_modref_templates.py | 2 +- nibabel/__init__.py | 6 +++--- nibabel/benchmarks/bench_arrayproxy_slicing.py | 3 --- nibabel/cifti2/__init__.py | 1 + nibabel/cifti2/tests/test_cifti2.py | 2 +- nibabel/cifti2/tests/test_cifti2io_header.py | 3 +-- nibabel/cmdline/diff.py | 3 --- nibabel/cmdline/tests/test_convert.py | 2 +- nibabel/cmdline/tests/test_roi.py | 3 +-- nibabel/cmdline/tests/test_stats.py | 3 --- nibabel/cmdline/tests/test_utils.py | 14 ++++++++++++-- nibabel/conftest.py | 2 +- nibabel/dft.py | 4 ++-- nibabel/ecat.py | 1 - nibabel/freesurfer/__init__.py | 2 ++ nibabel/freesurfer/tests/test_mghformat.py | 1 + nibabel/gifti/__init__.py | 2 ++ nibabel/gifti/tests/test_gifti.py | 13 ++++++------- nibabel/gifti/tests/test_parse_gifti_fast.py | 4 ++-- nibabel/info.py | 2 +- nibabel/nicom/tests/test_ascconv.py | 1 - nibabel/nicom/tests/test_csareader.py | 1 - nibabel/nicom/tests/test_dicomwrappers.py | 2 +- nibabel/openers.py | 2 +- nibabel/streamlines/__init__.py | 2 ++ nibabel/streamlines/tck.py | 1 - nibabel/streamlines/tests/test_array_sequence.py | 3 +-- nibabel/streamlines/tests/test_streamlines.py | 1 - nibabel/streamlines/tests/test_tck.py | 3 +-- nibabel/streamlines/tests/test_tractogram.py | 2 -- nibabel/streamlines/tests/test_tractogram_file.py | 1 - nibabel/streamlines/tests/test_trk.py | 2 +- nibabel/streamlines/trk.py | 5 +---- nibabel/testing/__init__.py | 3 +++ nibabel/tests/nibabel_data.py | 3 +-- nibabel/tests/scriptrunner.py | 3 +-- nibabel/tests/test_affines.py | 2 +- nibabel/tests/test_arraywriters.py | 6 +++--- nibabel/tests/test_brikhead.py | 2 +- nibabel/tests/test_data.py | 2 +- nibabel/tests/test_ecat.py | 1 - nibabel/tests/test_ecat_data.py | 2 +- nibabel/tests/test_floating.py | 3 --- nibabel/tests/test_funcs.py | 1 - nibabel/tests/test_image_load_save.py | 3 +-- nibabel/tests/test_image_types.py | 1 - nibabel/tests/test_imageclasses.py | 4 +--- nibabel/tests/test_init.py | 1 + nibabel/tests/test_minc1.py | 6 +----- nibabel/tests/test_minc2.py | 2 +- nibabel/tests/test_nibabel_data.py | 3 +-- nibabel/tests/test_nifti1.py | 1 - nibabel/tests/test_nifti2.py | 2 +- nibabel/tests/test_openers.py | 1 - nibabel/tests/test_orientations.py | 2 -- nibabel/tests/test_parrec.py | 5 ++--- nibabel/tests/test_parrec_data.py | 4 +--- nibabel/tests/test_pkg_info.py | 2 +- nibabel/tests/test_pointset.py | 3 --- nibabel/tests/test_quaternions.py | 7 ------- nibabel/tests/test_removalschedule.py | 1 - nibabel/tests/test_scripts.py | 5 ++--- nibabel/tests/test_spatialimages.py | 3 +-- nibabel/tests/test_testing.py | 4 ++-- nibabel/tests/test_wrapstruct.py | 9 --------- tools/make_tarball.py | 2 +- tools/markdown_release_notes.py | 2 +- tools/mpkg_wrapper.py | 2 +- 70 files changed, 79 insertions(+), 128 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 175c6340bd..e8999b7d2b 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -30,11 +30,11 @@ # Check for external Sphinx extensions we depend on try: - import numpydoc + import numpydoc # noqa: F401 except ImportError: raise RuntimeError('Need to install "numpydoc" package for doc build') try: - import texext + import texext # noqa: F401 except ImportError: raise RuntimeError('Need to install "texext" package for doc build') diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index 3167362643..a1279a3e98 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -405,7 +405,6 @@ def discover_modules(self): def write_modules_api(self, modules, outdir): # upper-level modules - main_module = modules[0].split('.')[0] ulms = [ '.'.join(m.split('.')[:2]) if m.count('.') >= 1 else m.split('.')[0] for m in modules ] diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index 11eae99741..0e82cf6bf8 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -38,7 +38,7 @@ def abort(error): try: __import__(package) - except ImportError as e: + except ImportError: abort('Can not import ' + package) module = sys.modules[package] diff --git a/nibabel/__init__.py b/nibabel/__init__.py index db427435ae..1cb7abf53f 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -7,6 +7,8 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# ruff: noqa: F401 + import os from .info import long_description as __doc__ @@ -39,12 +41,10 @@ # module imports from . import analyze as ana -from . import ecat, imagestats, mriutils +from . import ecat, imagestats, mriutils, orientations, streamlines, viewers from . import nifti1 as ni1 -from . import orientations from . import spm2analyze as spm2 from . import spm99analyze as spm99 -from . import streamlines, viewers # isort: split diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 305c5215e4..3444cb8d8f 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -56,7 +56,6 @@ def bench_arrayproxy_slicing(): - print_git_title('\nArrayProxy gzip slicing') # each test is a tuple containing @@ -100,7 +99,6 @@ def fmt_sliceobj(sliceobj): return f"[{', '.join(slcstr)}]" with InTemporaryDirectory(): - print(f'Generating test data... ({int(round(np.prod(SHAPE) * 4 / 1048576.0))} MB)') data = np.array(np.random.random(SHAPE), dtype=np.float32) @@ -128,7 +126,6 @@ def fmt_sliceobj(sliceobj): seeds = [np.random.randint(0, 2**32) for s in SLICEOBJS] for ti, test in enumerate(tests): - label = get_test_label(test) have_igzip, keep_open, sliceobj = test seed = seeds[SLICEOBJS.index(sliceobj)] diff --git a/nibabel/cifti2/__init__.py b/nibabel/cifti2/__init__.py index 9c6805f818..4a5cad7675 100644 --- a/nibabel/cifti2/__init__.py +++ b/nibabel/cifti2/__init__.py @@ -6,6 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# ruff: noqa: F401 """CIFTI-2 format IO .. currentmodule:: nibabel.cifti2 diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index bf287b8e03..d7fd0a0eda 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -7,7 +7,7 @@ import pytest from nibabel import cifti2 as ci -from nibabel.cifti2.cifti2 import Cifti2HeaderError, _float_01, _value_if_klass +from nibabel.cifti2.cifti2 import _float_01, _value_if_klass from nibabel.nifti2 import Nifti2Header from nibabel.tests.test_dataobj_images import TestDataobjAPI as _TDA from nibabel.tests.test_image_api import DtypeOverrideMixin, SerializeMixin diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 8d393686dd..92078a26d7 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import io from os.path import dirname from os.path import join as pjoin @@ -38,7 +37,7 @@ def test_space_separated_affine(): - img = ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) + _ = ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) def test_read_nifti2(): diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index b409c7205d..d20a105e76 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -231,7 +231,6 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1 = [None] * (i + 1) for j, d2 in enumerate(data[i + 1 :], i + 1): - if d1.shape == d2.shape: abs_diff = np.abs(d1 - d2) mean_abs = (np.abs(d1) + np.abs(d2)) * 0.5 @@ -255,7 +254,6 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): max_rel_diff = 0 if np.any(candidates): - diff_rec = OrderedDict() # so that abs goes before relative diff_rec['abs'] = max_abs_diff.astype(dtype) @@ -268,7 +266,6 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1.append({'CMP': 'incompat'}) if any(diffs1): - diffs['DATA(diff %d:)' % (i + 1)] = diffs1 return diffs diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 4605bc810d..021e6ea8ef 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -119,7 +119,7 @@ def test_convert_imgtype(tmp_path, ext, img_class): def test_convert_nifti_int_fail(tmp_path): infile = get_test_data(fname='anatomical.nii') - outfile = tmp_path / f'output.nii' + outfile = tmp_path / 'output.nii' orig = nib.load(infile) assert not outfile.exists() diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index ea3852b4da..d2baa80eeb 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -1,5 +1,4 @@ import os -import unittest from unittest import mock import numpy as np @@ -140,7 +139,7 @@ def test_entrypoint(capsys): # Check that we handle missing args as expected with mock.patch('sys.argv', ['nib-roi', '--help']): try: - retval = main() + main() except SystemExit: pass else: diff --git a/nibabel/cmdline/tests/test_stats.py b/nibabel/cmdline/tests/test_stats.py index 576a408bce..905114e31b 100644 --- a/nibabel/cmdline/tests/test_stats.py +++ b/nibabel/cmdline/tests/test_stats.py @@ -8,9 +8,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import sys -from io import StringIO - import numpy as np from nibabel import Nifti1Image diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 8143d648d9..0efb5ee0b9 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -12,8 +12,18 @@ import pytest import nibabel as nib -from nibabel.cmdline.diff import * -from nibabel.cmdline.utils import * +from nibabel.cmdline.diff import ( + display_diff, + get_data_diff, + get_data_hash_diff, + get_headers_diff, + main, +) +from nibabel.cmdline.utils import ( + ap, + safe_get, + table2string, +) from nibabel.testing import data_path diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 5eba256fa5..a4f8b6de90 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -5,7 +5,7 @@ # Ignore warning requesting help with nicom with pytest.warns(UserWarning): - import nibabel.nicom + import nibabel.nicom # noqa :401 @pytest.fixture(scope='session', autouse=True) diff --git a/nibabel/dft.py b/nibabel/dft.py index ee34595b3f..aeb8accbb5 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -161,7 +161,7 @@ def as_nifti(self): data = numpy.ndarray( (len(self.storage_instances), self.rows, self.columns), dtype=numpy.int16 ) - for (i, si) in enumerate(self.storage_instances): + for i, si in enumerate(self.storage_instances): if i + 1 != si.instance_number: raise InstanceStackError(self, i, si) logger.info('reading %d/%d' % (i + 1, len(self.storage_instances))) @@ -243,7 +243,7 @@ def dicom(self): def _get_subdirs(base_dir, files_dict=None, followlinks=False): dirs = [] - for (dirpath, dirnames, filenames) in os.walk(base_dir, followlinks=followlinks): + for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=followlinks): abs_dir = os.path.realpath(dirpath) if abs_dir in dirs: raise CachingError(f'link cycle detected under {base_dir}') diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 1db902d10a..85de9184b5 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -513,7 +513,6 @@ def read_subheaders(fileobj, mlist, endianness): class EcatSubHeader: - _subhdrdtype = subhdr_dtype _data_type_codes = data_type_codes diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index 806d19a272..48922285c9 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -1,6 +1,8 @@ """Reading functions for freesurfer files """ +# ruff: noqa: F401 + from .io import ( read_annot, read_geometry, diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 189f1a9dd7..d69587811b 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -460,6 +460,7 @@ def test_as_byteswapped(self): for endianness in (None,) + LITTLE_CODES: with pytest.raises(ValueError): hdr.as_byteswapped(endianness) + # Note that contents is not rechecked on swap / copy class DC(self.header_class): def check_fix(self, *args, **kwargs): diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index f54a1d2e54..d2a1e2da65 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -16,6 +16,8 @@ gifti """ +# ruff: noqa: F401 + from .gifti import ( GiftiCoordSystem, GiftiDataArray, diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 5cc2756c60..7e4c223971 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -2,12 +2,11 @@ """ import itertools import sys -import warnings from io import BytesIO import numpy as np import pytest -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_equal from nibabel.tmpdirs import InTemporaryDirectory @@ -329,7 +328,7 @@ def test_metadata_list_interface(): assert len(md) == 0 # Extension adds multiple keys - with pytest.warns(DeprecationWarning) as w: + with pytest.warns(DeprecationWarning) as _: foobar = GiftiNVPairs('foo', 'bar') mdlist.extend([nvpair, foobar]) assert len(mdlist) == 2 @@ -337,7 +336,7 @@ def test_metadata_list_interface(): assert md == {'key': 'value', 'foo': 'bar'} # Insertion updates list order, though we don't attempt to preserve it in the dict - with pytest.warns(DeprecationWarning) as w: + with pytest.warns(DeprecationWarning) as _: lastone = GiftiNVPairs('last', 'one') mdlist.insert(1, lastone) assert len(mdlist) == 3 @@ -360,14 +359,14 @@ def test_metadata_list_interface(): mypair.value = 'strings' assert 'completelynew' not in md assert md == {'foo': 'bar', 'last': 'one'} - # Check popping from the end (lastone inserted before foobar) - lastpair = mdlist.pop() + # Check popping from the end (last one inserted before foobar) + _ = mdlist.pop() assert len(mdlist) == 1 assert len(md) == 1 assert md == {'last': 'one'} # And let's remove an old pair with a new object - with pytest.warns(DeprecationWarning) as w: + with pytest.warns(DeprecationWarning) as _: lastoneagain = GiftiNVPairs('last', 'one') mdlist.remove(lastoneagain) assert len(mdlist) == 0 diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index f972425679..17258fbd30 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -447,13 +447,13 @@ def test_external_file_failure_cases(): shutil.copy(DATA_FILE7, '.') filename = pjoin(tmpdir, basename(DATA_FILE7)) with pytest.raises(GiftiParseError): - img = load(filename) + _ = load(filename) # load from in-memory xml string (parser requires it as bytes) with open(DATA_FILE7, 'rb') as f: xmldata = f.read() parser = GiftiImageParser() with pytest.raises(GiftiParseError): - img = parser.parse(xmldata) + _ = parser.parse(xmldata) def test_load_compressed(): diff --git a/nibabel/info.py b/nibabel/info.py index a608932fa8..d7873de211 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -108,4 +108,4 @@ .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier .. _zenodo: https://zenodo.org -""" # noqa: E501 +""" # noqa: E501 diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index cd27bc3192..cf40298c56 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -5,7 +5,6 @@ from os.path import dirname from os.path import join as pjoin -import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from .. import ascconv diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 0fc559c7fc..ddb46a942a 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -1,7 +1,6 @@ """Testing Siemens CSA header reader """ import gzip -import sys from copy import deepcopy from os.path import join as pjoin diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 5c29349362..fa2dfc07c6 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -631,7 +631,7 @@ def test_image_position(self): def test_affine(self): # Make sure we find orientation/position/spacing info dw = didw.wrapper_from_file(DATA_FILE_4D) - aff = dw.affine + _ = dw.affine @dicom_test @pytest.mark.xfail(reason='Not packaged in install', raises=FileNotFoundError) diff --git a/nibabel/openers.py b/nibabel/openers.py index 90c7774d12..d69412fb85 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -86,7 +86,6 @@ def _gzip_open( mtime: int = 0, keep_open: bool = False, ) -> gzip.GzipFile: - if not HAVE_INDEXED_GZIP or mode != 'rb': gzip_file = DeterministicGzipFile(filename, mode, compresslevel, mtime=mtime) @@ -129,6 +128,7 @@ class Opener: passed to opening method when `fileish` is str. Change of defaults as for \*args """ + gz_def = (_gzip_open, ('mode', 'compresslevel', 'mtime', 'keep_open')) bz2_def = (BZ2File, ('mode', 'buffering', 'compresslevel')) zstd_def = (_zstd_open, ('mode', 'level_or_option', 'zstd_dict')) diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index f99f80e4e4..f3cbd2da59 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -1,5 +1,7 @@ """Multiformat-capable streamline format read / write interface """ +# ruff: noqa: F401 + import os import warnings diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 43df2f87e0..358c579362 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -309,7 +309,6 @@ def _read_header(cls, fileobj): offset_data = 0 with Opener(fileobj) as f: - # Record start position start_position = f.tell() diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index 0c8557fe50..a06b2c45d9 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -1,6 +1,5 @@ import itertools import os -import sys import tempfile import unittest @@ -220,7 +219,7 @@ def test_arraysequence_extend(self): seq.extend(data) # Extend after extracting some slice - working_slice = seq[:2] + _ = seq[:2] seq.extend(ArraySequence(new_data)) def test_arraysequence_getitem(self): diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 300397b2b4..f0bd9c7c49 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -1,5 +1,4 @@ import os -import tempfile import unittest import warnings from io import BytesIO diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index 3df7dd4f2d..6b4c163ed6 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -8,7 +8,6 @@ from numpy.testing import assert_array_equal from ...testing import data_path, error_warnings -from .. import tck as tck_module from ..array_sequence import ArraySequence from ..tck import TckFile from ..tractogram import Tractogram @@ -138,7 +137,7 @@ def test_load_file_with_wrong_information(self): # Simulate a TCK file with no `file` field. new_tck_file = tck_file.replace(b'\nfile: . 67', b'') - with pytest.warns(HeaderWarning, match="Missing 'file'") as w: + with pytest.warns(HeaderWarning, match="Missing 'file'") as _: tck = TckFile.load(BytesIO(new_tck_file)) assert_array_equal(tck.header['file'], '. 56') diff --git a/nibabel/streamlines/tests/test_tractogram.py b/nibabel/streamlines/tests/test_tractogram.py index 09e3b910be..9159688548 100644 --- a/nibabel/streamlines/tests/test_tractogram.py +++ b/nibabel/streamlines/tests/test_tractogram.py @@ -1,6 +1,5 @@ import copy import operator -import sys import unittest import warnings from collections import defaultdict @@ -172,7 +171,6 @@ def setup_module(): def check_tractogram_item(tractogram_item, streamline, data_for_streamline={}, data_for_points={}): - assert_array_equal(tractogram_item.streamline, streamline) assert len(tractogram_item.data_for_streamline) == len(data_for_streamline) diff --git a/nibabel/streamlines/tests/test_tractogram_file.py b/nibabel/streamlines/tests/test_tractogram_file.py index 53a7fb662b..71e2326ecf 100644 --- a/nibabel/streamlines/tests/test_tractogram_file.py +++ b/nibabel/streamlines/tests/test_tractogram_file.py @@ -8,7 +8,6 @@ def test_subclassing_tractogram_file(): - # Missing 'save' method class DummyTractogramFile(TractogramFile): @classmethod diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index b8ff43620b..749bf3ed30 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -149,7 +149,7 @@ def test_load_file_with_wrong_information(self): # Simulate a TRK where `vox_to_ras` is invalid. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct[Field.VOXEL_TO_RASMM] = np.diag([0, 0, 0, 1]) - with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: + with clear_and_catch_warnings(record=True, modules=[trk_module]) as _: with pytest.raises(HeaderError): TrkFile.load(BytesIO(trk_bytes)) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 966b133d1f..0b11f5684e 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -366,7 +366,6 @@ def _read(): tractogram = LazyTractogram.from_data_func(_read) else: - # Speed up loading by guessing a suitable buffer size. with Opener(fileobj) as f: old_file_position = f.tell() @@ -773,6 +772,4 @@ def __str__(self): swap_yz: {swap_yz} swap_zx: {swap_zx} n_count: {NB_STREAMLINES} -hdr_size: {hdr_size}""".format( - **vars - ) +hdr_size: {hdr_size}""".format(**vars) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 21ecadf841..a3e98e064b 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -7,6 +7,9 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for testing""" + +# ruff: noqa: F401 + from __future__ import annotations import os diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 8d4652d79f..1f89c9c1a1 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -3,9 +3,8 @@ import unittest from os import environ, listdir -from os.path import dirname, exists, isdir +from os.path import dirname, exists, isdir, realpath from os.path import join as pjoin -from os.path import realpath def get_nibabel_data(): diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 1ec2fcb486..1e8b1fdda2 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -14,9 +14,8 @@ """ import os import sys -from os.path import dirname, isdir, isfile +from os.path import dirname, isdir, isfile, pathsep, realpath from os.path import join as pjoin -from os.path import pathsep, realpath from subprocess import PIPE, Popen MY_PACKAGE = __package__ diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 28f405e566..1d7ef1e6bf 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -225,7 +225,7 @@ def test_rescale_affine(): orig_shape = rng.randint(low=20, high=512, size=(3,)) orig_aff = np.eye(4) orig_aff[:3, :] = rng.normal(size=(3, 4)) - orig_zooms = voxel_sizes(orig_aff) + orig_zooms = voxel_sizes(orig_aff) # noqa: F841 orig_axcodes = aff2axcodes(orig_aff) orig_centroid = apply_affine(orig_aff, (orig_shape - 1) // 2) diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 89e7ac6755..2fc9c32358 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -276,7 +276,7 @@ def test_slope_inter_castable(): for out_dtt in NUMERIC_TYPES: for klass in (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter): arr = np.zeros((5,), dtype=in_dtt) - aw = klass(arr, out_dtt) # no error + _ = klass(arr, out_dtt) # no error # Test special case of none finite # This raises error for ArrayWriter, but not for the others arr = np.array([np.inf, np.nan, -np.inf]) @@ -285,8 +285,8 @@ def test_slope_inter_castable(): in_arr = arr.astype(in_dtt) with pytest.raises(WriterError): ArrayWriter(in_arr, out_dtt) - aw = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error - aw = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error + _ = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error + _ = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error for in_dtt, out_dtt, arr, slope_only, slope_inter, neither in ( (np.float32, np.float32, 1, True, True, True), (np.float64, np.float32, 1, True, True, True), diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index 5bf6e79cb9..31e0d0d47c 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -13,7 +13,7 @@ import pytest from numpy.testing import assert_array_equal -from .. import Nifti1Image, brikhead, load +from .. import Nifti1Image, brikhead from ..testing import assert_data_similar, data_path from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index abcb3caaf2..3ccb4963ca 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -22,7 +22,7 @@ get_data_path, make_datasource, ) -from .test_environment import DATA_KEY, USER_KEY, with_environment +from .test_environment import DATA_KEY, USER_KEY, with_environment # noqa: F401 @pytest.fixture diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 6a076cbc38..702913e14d 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import os -import warnings from pathlib import Path from unittest import TestCase diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index b7dbe4750a..23485ae92b 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -13,7 +13,7 @@ from os.path import join as pjoin import numpy as np -from numpy.testing import assert_almost_equal, assert_array_equal +from numpy.testing import assert_almost_equal from ..ecat import load from .nibabel_data import get_nibabel_data, needs_nibabel_data diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 3e6e7f426b..c2ccd44039 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -1,10 +1,8 @@ """Test floating point deconstructions and floor methods """ import sys -from contextlib import nullcontext import numpy as np -import pytest from packaging.version import Version from ..casting import ( @@ -13,7 +11,6 @@ _check_nmant, ceil_exact, floor_exact, - floor_log2, have_binary128, longdouble_precision_improved, ok_floats, diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 10f6e90813..5e59bc63b6 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -58,7 +58,6 @@ def test_concat(): # Loop over every possible axis, including None (explicit and implied) for axis in list(range(-(dim - 2), (dim - 1))) + [None, '__default__']: - # Allow testing default vs. passing explicit param if axis == '__default__': np_concat_kwargs = dict(axis=-1) diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 706a87f10f..4e787f0d71 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -24,7 +24,6 @@ MGHImage, Minc1Image, Minc2Image, - Nifti1Header, Nifti1Image, Nifti1Pair, Nifti2Image, @@ -131,7 +130,7 @@ def test_save_load(): affine[:3, 3] = [3, 2, 1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) - with InTemporaryDirectory() as pth: + with InTemporaryDirectory() as _: nifn = 'an_image.nii' sifn = 'another_image.img' ni1.save(img, nifn) diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index da2f93e21f..bc50c8417e 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -88,7 +88,6 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, msg): irrelevant=b'a' * (sizeof_hdr - 1), # A too-small sniff, query bad_sniff=b'a' * sizeof_hdr, # Bad sniff, should fail ).items(): - for klass in img_klasses: if klass == expected_img_klass: # Class will load unless you pass a bad sniff, diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 74f05dc6e3..90424b7d34 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -1,15 +1,13 @@ """Testing imageclasses module """ -import warnings from os.path import dirname from os.path import join as pjoin import numpy as np -import pytest import nibabel as nib -from nibabel import imageclasses +from nibabel import imageclasses # noqa: F401 from nibabel.analyze import AnalyzeImage from nibabel.imageclasses import spatial_axes_first from nibabel.nifti1 import Nifti1Image diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index 2317a6397e..969b80b6fc 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -1,4 +1,5 @@ import pathlib +import unittest from unittest import mock import pytest diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index be4f0deb07..8f88bf802d 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -9,8 +9,6 @@ import bz2 import gzip -import types -import warnings from io import BytesIO from os.path import join as pjoin @@ -19,12 +17,10 @@ from numpy.testing import assert_array_equal from .. import Nifti1Image, load, minc1 -from ..deprecated import ModuleProxy -from ..deprecator import ExpiredDeprecationError from ..externals.netcdf import netcdf_file from ..minc1 import Minc1File, Minc1Image, MincHeader from ..optpkg import optional_package -from ..testing import assert_data_similar, clear_and_catch_warnings, data_path +from ..testing import assert_data_similar, data_path from ..tmpdirs import InTemporaryDirectory from . import test_spatialimages as tsi from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index e76cb05ce7..7ab29edfde 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -129,5 +129,5 @@ def test_bad_diminfo(): # File has a bad spacing field 'xspace' when it should be # `irregular`, `regular__` or absent (default to regular__). # We interpret an invalid spacing as absent, but warn. - with pytest.warns(UserWarning) as w: + with pytest.warns(UserWarning) as _: Minc2Image.from_filename(fname) diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index 1687589549..0c7116e9a0 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -2,9 +2,8 @@ """ import os -from os.path import dirname, isdir +from os.path import dirname, isdir, realpath from os.path import join as pjoin -from os.path import realpath from . import nibabel_data as nibd diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index c7c4d1d84b..a5b9427bc4 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -731,7 +731,6 @@ def unshear_44(affine): class TestNifti1SingleHeader(TestNifti1PairHeader): - header_class = Nifti1Header def test_empty(self): diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index 742ef148bf..a25e23b49d 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -13,7 +13,7 @@ from numpy.testing import assert_array_equal from .. import nifti2 -from ..nifti1 import Nifti1Extension, Nifti1Extensions, Nifti1Header, Nifti1PairHeader +from ..nifti1 import Nifti1Extension, Nifti1Header, Nifti1PairHeader from ..nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair, Nifti2PairHeader from ..testing import data_path from . import test_nifti1 as tn1 diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index a228e66135..5c6a1643cc 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -19,7 +19,6 @@ import pytest from packaging.version import Version -from ..deprecator import ExpiredDeprecationError from ..openers import HAVE_INDEXED_GZIP, BZ2File, DeterministicGzipFile, ImageOpener, Opener from ..optpkg import optional_package from ..tmpdirs import InTemporaryDirectory diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 0094711e79..7e4a33e29f 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Testing for orientations module""" -import warnings import numpy as np import pytest @@ -185,7 +184,6 @@ def test_apply(): apply_orientation(a[:, :, 1], ornt) with pytest.raises(OrientationError): apply_orientation(a, [[0, 1], [np.nan, np.nan], [2, 1]]) - shape = np.array(a.shape) for ornt in ALL_ORNTS: t_arr = apply_orientation(a, ornt) assert_array_equal(a.shape, np.array(t_arr.shape)[np.array(ornt)[:, 0]]) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 6035d47f8d..980a2f403f 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -285,8 +285,8 @@ def test_affine_regression(): # Test against checked affines from previous runs # Checked against Michael's data using some GUI tools # Data at http://psydata.ovgu.de/philips_achieva_testfiles/conversion2 - for basename, exp_affine in PREVIOUS_AFFINES.items(): - fname = pjoin(DATA_PATH, basename + '.PAR') + for basename_affine, exp_affine in PREVIOUS_AFFINES.items(): + fname = pjoin(DATA_PATH, basename_affine + '.PAR') with open(fname) as fobj: hdr = PARRECHeader.from_fileobj(fobj) assert_almost_equal(hdr.get_affine(), exp_affine) @@ -884,7 +884,6 @@ def test_dualTR(): def test_ADC_map(): # test reading an apparent diffusion coefficient map with open(ADC_PAR) as fobj: - # two truncation warnings expected because general_info indicates: # 1.) multiple directions # 2.) multiple b-values diff --git a/nibabel/tests/test_parrec_data.py b/nibabel/tests/test_parrec_data.py index a437fafeda..2a52d97250 100644 --- a/nibabel/tests/test_parrec_data.py +++ b/nibabel/tests/test_parrec_data.py @@ -3,12 +3,10 @@ import unittest from glob import glob -from os.path import basename, exists +from os.path import basename, exists, splitext from os.path import join as pjoin -from os.path import splitext import numpy as np -import pytest from numpy.testing import assert_almost_equal from .. import load as top_load diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index dfe18c975a..1422bb3351 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -15,7 +15,7 @@ def test_pkg_info(): - nibabel.pkg_info.get_pkg_info - nibabel.pkg_info.pkg_commit_hash """ - info = nib.get_info() + _ = nib.get_info() def test_version(): diff --git a/nibabel/tests/test_pointset.py b/nibabel/tests/test_pointset.py index fb9a7c5c81..f4f0e4361b 100644 --- a/nibabel/tests/test_pointset.py +++ b/nibabel/tests/test_pointset.py @@ -1,15 +1,12 @@ from math import prod from pathlib import Path -from unittest import skipUnless import numpy as np import pytest from nibabel import pointset as ps from nibabel.affines import apply_affine -from nibabel.arrayproxy import ArrayProxy from nibabel.fileslice import strided_scalar -from nibabel.onetime import auto_attr from nibabel.optpkg import optional_package from nibabel.spatialimages import SpatialImage from nibabel.tests.nibabel_data import get_nibabel_data diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index fff7c5e040..ec882dd0b3 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -112,7 +112,6 @@ def test_fillpositive_simulated_error(dtype): # Permit 1 epsilon per value (default, but make explicit here) w2_thresh = 3 * np.finfo(dtype).eps - pos_error = neg_error = False for _ in range(50): xyz = norm(gen_vec(dtype)) @@ -186,12 +185,6 @@ def test_inverse(M, q): assert np.allclose(iM, iqM) -def test_eye(): - qi = nq.eye() - assert np.all([1, 0, 0, 0] == qi) - assert np.allclose(nq.quat2mat(qi), np.eye(3)) - - @pytest.mark.parametrize('vec', np.eye(3)) @pytest.mark.parametrize('M, q', eg_pairs) def test_qrotate(vec, M, q): diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 772d395fd4..7a56f3fb8b 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -1,4 +1,3 @@ -import unittest from unittest import mock import pytest diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index cc4bb468ad..455a994ae1 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -11,9 +11,8 @@ import sys import unittest from glob import glob -from os.path import abspath, basename, dirname, exists +from os.path import abspath, basename, dirname, exists, splitext from os.path import join as pjoin -from os.path import splitext import numpy as np import pytest @@ -197,7 +196,7 @@ def test_help(): # needs special treatment since depends on fuse module which # might not be available. try: - import fuse + import fuse # noqa: F401 except Exception: continue # do not test this one code, stdout, stderr = run_command([cmd, '--help']) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 7157d5c459..a5cab9e751 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -399,8 +399,7 @@ def test_slicer(self): img_klass = self.image_class in_data_template = np.arange(240, dtype=np.int16) base_affine = np.eye(4) - t_axis = None - for dshape in ((4, 5, 6, 2), (8, 5, 6)): # Time series # Volume + for dshape in ((4, 5, 6, 2), (8, 5, 6)): # Time series # Volume in_data = in_data_template.copy().reshape(dshape) img = img_klass(in_data, base_affine.copy()) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index dee3ea3554..1ca1fb9b97 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -114,7 +114,7 @@ def test_warn_error(): with error_warnings(): with pytest.raises(UserWarning): warnings.warn('A test') - with error_warnings() as w: # w not used for anything + with error_warnings() as _: with pytest.raises(UserWarning): warnings.warn('A test') assert n_warns == len(warnings.filters) @@ -134,7 +134,7 @@ def test_warn_ignore(): with suppress_warnings(): warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) - with suppress_warnings() as w: # w not used + with suppress_warnings() as _: warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) assert n_warns == len(warnings.filters) diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 10b4b3f22c..e18fb0210a 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -435,15 +435,6 @@ def test_copy(self): self._set_something_into_hdr(hdr2) assert hdr == hdr2 - def test_copy(self): - hdr = self.header_class() - hdr2 = hdr.copy() - assert hdr == hdr2 - self._set_something_into_hdr(hdr) - assert hdr != hdr2 - self._set_something_into_hdr(hdr2) - assert hdr == hdr2 - def test_checks(self): # Test header checks hdr_t = self.header_class() diff --git a/tools/make_tarball.py b/tools/make_tarball.py index 3cdad40d0b..b49a1f276a 100755 --- a/tools/make_tarball.py +++ b/tools/make_tarball.py @@ -5,7 +5,7 @@ import os import commands -from toollib import * +from toollib import c, cd tag = commands.getoutput('git describe') base_name = f'nibabel-{tag}' diff --git a/tools/markdown_release_notes.py b/tools/markdown_release_notes.py index 66e7876036..73bdbf7752 100644 --- a/tools/markdown_release_notes.py +++ b/tools/markdown_release_notes.py @@ -27,7 +27,7 @@ def main(): if in_release_notes: break in_release_notes = match.group(1) == version - next(f) # Skip the underline + next(f) # Skip the underline continue if in_release_notes: diff --git a/tools/mpkg_wrapper.py b/tools/mpkg_wrapper.py index 0a96156e4d..f5f059b28d 100644 --- a/tools/mpkg_wrapper.py +++ b/tools/mpkg_wrapper.py @@ -24,7 +24,7 @@ def main(): g = dict(globals()) g['__file__'] = sys.argv[0] g['__name__'] = '__main__' - execfile(sys.argv[0], g, g) + exec(open(sys.argv[0]).read(), g, g) if __name__ == '__main__': From 04dd1f4fd1a7491c91d1c3c1dfadeac8ade5aeaa Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 5 Mar 2024 17:24:14 +0100 Subject: [PATCH 536/702] =?UTF-8?q?MNT:=20ruff=200.1.9=20=E2=86=92=200.3.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .pre-commit-config.yaml | 2 +- tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ef2d891fbd..d35d287579 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: - id: check-merge-conflict - id: check-vcs-permalinks - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.3.0 hooks: - id: ruff args: [--fix, --show-fix, --exit-non-zero-on-fix] diff --git a/tox.ini b/tox.ini index 4e9b220ce8..53860445aa 100644 --- a/tox.ini +++ b/tox.ini @@ -139,7 +139,7 @@ commands = description = Check our style guide labels = check deps = - ruff>=0.1.9 + ruff>=0.3.0 skip_install = true commands = ruff --diff nibabel From 3ee9480d356198167c9c45854ecc489a7c186416 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 5 Mar 2024 16:51:02 +0100 Subject: [PATCH 537/702] MNT: run `ruff check --fix` and `ruff format` Also fix remaining issues manually. --- nibabel/_compression.py | 1 + nibabel/affines.py | 1 + nibabel/analyze.py | 1 + nibabel/arrayproxy.py | 15 +++++++-------- nibabel/arraywriters.py | 1 + nibabel/benchmarks/butils.py | 3 +-- nibabel/brikhead.py | 1 + nibabel/casting.py | 1 + nibabel/cifti2/cifti2.py | 1 + nibabel/cifti2/cifti2_axes.py | 1 + nibabel/cifti2/tests/test_cifti2.py | 4 ++-- nibabel/cifti2/tests/test_new_cifti2.py | 1 + nibabel/cmdline/__init__.py | 3 +-- nibabel/cmdline/diff.py | 6 +++--- nibabel/cmdline/parrec2nii.py | 3 +-- nibabel/cmdline/tck2trk.py | 1 + nibabel/cmdline/tests/test_parrec2nii.py | 4 ++-- nibabel/cmdline/utils.py | 1 - nibabel/data.py | 1 + nibabel/dataobj_images.py | 1 + nibabel/deprecated.py | 1 + nibabel/deprecator.py | 1 + nibabel/dft.py | 2 -- nibabel/ecat.py | 1 + nibabel/environment.py | 1 + nibabel/eulerangles.py | 1 + nibabel/filebasedimages.py | 1 + nibabel/fileholders.py | 1 + nibabel/filename_parser.py | 1 + nibabel/fileslice.py | 1 + nibabel/freesurfer/__init__.py | 3 +-- nibabel/freesurfer/io.py | 3 +-- nibabel/freesurfer/mghformat.py | 1 + nibabel/funcs.py | 1 + nibabel/gifti/gifti.py | 1 + nibabel/gifti/tests/test_gifti.py | 4 ++-- nibabel/imageclasses.py | 1 + nibabel/imageglobals.py | 1 + nibabel/imagestats.py | 1 + nibabel/loadsave.py | 1 + nibabel/minc1.py | 1 + nibabel/minc2.py | 1 + nibabel/nicom/__init__.py | 1 + nibabel/nicom/ascconv.py | 1 + nibabel/nicom/csareader.py | 4 ++-- nibabel/nicom/dwiparams.py | 1 + nibabel/nicom/tests/test_ascconv.py | 3 +-- nibabel/nicom/tests/test_csareader.py | 4 ++-- nibabel/nicom/tests/test_dicomreaders.py | 3 +-- nibabel/nicom/tests/test_dicomwrappers.py | 3 +-- nibabel/nicom/tests/test_dwiparams.py | 3 +-- nibabel/nicom/tests/test_structreader.py | 4 ++-- nibabel/nicom/tests/test_utils.py | 4 ++-- nibabel/nicom/utils.py | 3 +-- nibabel/nifti1.py | 1 + nibabel/nifti2.py | 1 + nibabel/onetime.py | 9 +++++---- nibabel/openers.py | 7 +++---- nibabel/optpkg.py | 1 + nibabel/orientations.py | 1 + nibabel/parrec.py | 1 + nibabel/pointset.py | 9 +++++---- nibabel/processing.py | 1 + nibabel/pydicom_compat.py | 1 + nibabel/quaternions.py | 1 + nibabel/rstutils.py | 1 + nibabel/spaces.py | 1 + nibabel/spatialimages.py | 16 ++++++---------- nibabel/spm2analyze.py | 1 + nibabel/spm99analyze.py | 1 + nibabel/streamlines/__init__.py | 3 +-- nibabel/streamlines/header.py | 3 +-- .../streamlines/tests/test_tractogram_file.py | 3 +-- nibabel/streamlines/tractogram_file.py | 4 ++-- nibabel/testing/helpers.py | 4 ++-- nibabel/testing/np_features.py | 4 ++-- nibabel/tests/data/check_parrec_reslice.py | 1 + nibabel/tests/data/gen_standard.py | 1 + nibabel/tests/nibabel_data.py | 3 +-- nibabel/tests/scriptrunner.py | 1 + nibabel/tests/test_api_validators.py | 4 ++-- nibabel/tests/test_arrayproxy.py | 3 +-- nibabel/tests/test_batteryrunners.py | 3 +-- nibabel/tests/test_casting.py | 4 ++-- nibabel/tests/test_data.py | 3 ++- nibabel/tests/test_dataobj_images.py | 3 +-- nibabel/tests/test_deprecated.py | 3 +-- nibabel/tests/test_deprecator.py | 3 +-- nibabel/tests/test_dft.py | 3 +-- nibabel/tests/test_diff.py | 3 +-- nibabel/tests/test_ecat_data.py | 3 +-- nibabel/tests/test_environment.py | 3 +-- nibabel/tests/test_filebasedimages.py | 3 +-- nibabel/tests/test_fileholders.py | 3 +-- nibabel/tests/test_filename_parser.py | 1 + nibabel/tests/test_files_interface.py | 3 +-- nibabel/tests/test_fileslice.py | 1 - nibabel/tests/test_fileutils.py | 4 +--- nibabel/tests/test_floating.py | 4 ++-- nibabel/tests/test_image_api.py | 1 - nibabel/tests/test_image_load_save.py | 1 + nibabel/tests/test_imageclasses.py | 3 +-- nibabel/tests/test_imageglobals.py | 4 ++-- nibabel/tests/test_loadsave.py | 3 +-- nibabel/tests/test_minc2_data.py | 3 +-- nibabel/tests/test_mriutils.py | 4 +--- nibabel/tests/test_nibabel_data.py | 3 +-- nibabel/tests/test_nifti1.py | 1 + nibabel/tests/test_nifti2.py | 1 + nibabel/tests/test_onetime.py | 2 -- nibabel/tests/test_openers.py | 1 + nibabel/tests/test_optpkg.py | 3 +-- nibabel/tests/test_orientations.py | 1 - nibabel/tests/test_parrec.py | 3 +-- nibabel/tests/test_parrec_data.py | 3 +-- nibabel/tests/test_pkg_info.py | 3 +-- nibabel/tests/test_processing.py | 3 +-- nibabel/tests/test_rstutils.py | 3 +-- nibabel/tests/test_spaces.py | 3 +-- nibabel/tests/test_spatialimages.py | 3 +-- nibabel/tests/test_testing.py | 3 +-- nibabel/tests/test_tripwire.py | 3 +-- nibabel/tests/test_wrapstruct.py | 1 + nibabel/tmpdirs.py | 1 + nibabel/tripwire.py | 1 + nibabel/viewers.py | 1 + nibabel/volumeutils.py | 9 +++++---- nibabel/wrapstruct.py | 1 + nibabel/xmlutils.py | 1 + tox.ini | 2 +- 130 files changed, 166 insertions(+), 161 deletions(-) diff --git a/nibabel/_compression.py b/nibabel/_compression.py index b7cfc8f49f..eeb66f36b4 100644 --- a/nibabel/_compression.py +++ b/nibabel/_compression.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Constants and types for dealing transparently with compression""" + from __future__ import annotations import bz2 diff --git a/nibabel/affines.py b/nibabel/affines.py index 1478fd2dca..4b6001dec0 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utility routines for working with points and affine transforms""" + from functools import reduce import numpy as np diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 20fdac055a..189f2e0a1a 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -81,6 +81,7 @@ can be loaded with and without a default flip, so the saved zoom will not constrain the affine. """ + from __future__ import annotations import numpy as np diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 57d8aa0f8b..4bf5bd4700 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -25,6 +25,7 @@ See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks. """ + from __future__ import annotations import typing as ty @@ -74,21 +75,19 @@ class ArrayLike(ty.Protocol): shape: tuple[int, ...] @property - def ndim(self) -> int: - ... # pragma: no cover + def ndim(self) -> int: ... # pragma: no cover # If no dtype is passed, any dtype might be returned, depending on the array-like @ty.overload - def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: - ... # pragma: no cover + def __array__( + self, dtype: None = ..., / + ) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... # pragma: no cover # Any dtype might be passed, and *that* dtype must be returned @ty.overload - def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: - ... # pragma: no cover + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... # pragma: no cover - def __getitem__(self, key, /) -> npt.NDArray: - ... # pragma: no cover + def __getitem__(self, key, /) -> npt.NDArray: ... # pragma: no cover class ArrayProxy(ArrayLike): diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 751eb6ad1f..1f55263fc3 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -28,6 +28,7 @@ def __init__(self, array, out_dtype=None) something else to make sense of conversions between float and int, or between larger ints and smaller. """ + import numpy as np from .casting import best_float, floor_exact, int_abs, shared_range, type_info diff --git a/nibabel/benchmarks/butils.py b/nibabel/benchmarks/butils.py index 01d6931eba..13c255d1c1 100644 --- a/nibabel/benchmarks/butils.py +++ b/nibabel/benchmarks/butils.py @@ -1,5 +1,4 @@ -"""Benchmarking utilities -""" +"""Benchmarking utilities""" from .. import get_info diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 6694ff08a5..3a3cfd0871 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -26,6 +26,7 @@ am aware) always be >= 1. This permits sub-brick indexing common in AFNI programs (e.g., example4d+orig'[0]'). """ + import os import re from copy import deepcopy diff --git a/nibabel/casting.py b/nibabel/casting.py index 77da57e406..31e27d0e8c 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -3,6 +3,7 @@ Most routines work round some numpy oddities in floating point precision and casting. Others work round numpy casting to and from python ints """ + from __future__ import annotations import warnings diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 452bceb7ea..cb2e0cfaf4 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -16,6 +16,7 @@ http://www.nitrc.org/projects/cifti """ + import re from collections import OrderedDict from collections.abc import Iterable, MutableMapping, MutableSequence diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 6443a34fb5..af7c63beaa 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -118,6 +118,7 @@ ... bm_cortex))) """ + import abc from operator import xor diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index d7fd0a0eda..895b8f9597 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -1,5 +1,5 @@ -"""Testing CIFTI-2 objects -""" +"""Testing CIFTI-2 objects""" + import collections from xml.etree import ElementTree diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 0f90b822da..4cf5502ad7 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -6,6 +6,7 @@ These functions are used in the tests to generate most CIFTI file types from scratch. """ + import numpy as np import pytest diff --git a/nibabel/cmdline/__init__.py b/nibabel/cmdline/__init__.py index 6478e5f261..f0744521bc 100644 --- a/nibabel/cmdline/__init__.py +++ b/nibabel/cmdline/__init__.py @@ -6,5 +6,4 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Functionality to be exposed in the command line -""" +"""Functionality to be exposed in the command line""" diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index d20a105e76..1231a778f4 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -246,9 +246,9 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): sub_thr = rel_diff <= max_rel # Since we operated on sub-selected values already, we need # to plug them back in - candidates[ - tuple(indexes[sub_thr] for indexes in np.where(candidates)) - ] = False + candidates[tuple(indexes[sub_thr] for indexes in np.where(candidates))] = ( + False + ) max_rel_diff = np.max(rel_diff) else: max_rel_diff = 0 diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 9340626395..0ae6b3fb40 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -1,5 +1,4 @@ -"""Code for PAR/REC to NIfTI converter command -""" +"""Code for PAR/REC to NIfTI converter command""" import csv import os diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index d5d29ba430..a73540c446 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -1,6 +1,7 @@ """ Convert tractograms (TCK -> TRK). """ + import argparse import os diff --git a/nibabel/cmdline/tests/test_parrec2nii.py b/nibabel/cmdline/tests/test_parrec2nii.py index 017df9813a..ccedafb74b 100644 --- a/nibabel/cmdline/tests/test_parrec2nii.py +++ b/nibabel/cmdline/tests/test_parrec2nii.py @@ -1,5 +1,5 @@ -"""Tests for the parrec2nii exe code -""" +"""Tests for the parrec2nii exe code""" + from os.path import basename, isfile, join from unittest.mock import MagicMock, Mock, patch diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 2149235704..d89cc5c964 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -10,7 +10,6 @@ Helper utilities to be used in cmdline applications """ - # global verbosity switch import re from io import StringIO diff --git a/nibabel/data.py b/nibabel/data.py index 7e2fe2af70..c49580d09b 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utilities to find files from NIPY data packages""" + import configparser import glob import os diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index eaf341271e..a2ee691a16 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -7,6 +7,7 @@ * returns an array from ``numpy.asanyarray(obj)``; * has an attribute or property ``shape``. """ + from __future__ import annotations import typing as ty diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 092370106e..b8c378cee3 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -1,4 +1,5 @@ """Module to help with deprecating objects and classes""" + from __future__ import annotations import typing as ty diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 779fdb462d..b9912534d2 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -1,4 +1,5 @@ """Class for recording and reporting deprecations""" + from __future__ import annotations import functools diff --git a/nibabel/dft.py b/nibabel/dft.py index aeb8accbb5..d9e3359998 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -9,7 +9,6 @@ # Copyright (C) 2011 Christian Haselgrove """DICOM filesystem tools""" - import contextlib import getpass import logging @@ -44,7 +43,6 @@ class VolumeError(DFTError): class InstanceStackError(DFTError): - """bad series of instance numbers""" def __init__(self, series, i, si): diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 85de9184b5..03a4c72b98 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -42,6 +42,7 @@ GPL and some of the header files are adapted from CTI files (called CTI code below). It's not clear what the licenses are for these files. """ + import warnings from numbers import Integral diff --git a/nibabel/environment.py b/nibabel/environment.py index 09aaa6320f..a828ccb865 100644 --- a/nibabel/environment.py +++ b/nibabel/environment.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Settings from the system environment relevant to NIPY""" + import os from os.path import join as pjoin diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index 13dc059644..b1d187e8c1 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -82,6 +82,7 @@ ``y``, followed by rotation around ``x``, is known (confusingly) as "xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles. """ + import math from functools import reduce diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 42760cccdf..4e0d06b64c 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Common interface for any image format--volume or surface, binary or xml""" + from __future__ import annotations import io diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index a27715350d..3db4c62a9e 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Fileholder class""" + from __future__ import annotations import io diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 92a2f4b1f5..bdbca6a383 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Create filename pairs, triplets etc, with expected extensions""" + from __future__ import annotations import os diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index fe7d6bba54..816f1cdaf6 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -1,4 +1,5 @@ """Utilities for getting array slices out of file-like objects""" + import operator from functools import reduce from mmap import mmap diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index 48922285c9..aa76eb2e89 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -1,5 +1,4 @@ -"""Reading functions for freesurfer files -""" +"""Reading functions for freesurfer files""" # ruff: noqa: F401 diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index b4d6ef2a3a..74bc05fc31 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -1,5 +1,4 @@ -"""Read / write FreeSurfer geometry, morphometry, label, annotation formats -""" +"""Read / write FreeSurfer geometry, morphometry, label, annotation formats""" import getpass import time diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 5dd2660342..93abf7b407 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -10,6 +10,7 @@ Author: Krish Subramaniam """ + from os.path import splitext import numpy as np diff --git a/nibabel/funcs.py b/nibabel/funcs.py index f83ed68709..cda4a5d2ed 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Processor functions for images""" + import numpy as np from .loadsave import load diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 7aba877309..7c5c3c4fb0 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -11,6 +11,7 @@ The Gifti specification was (at time of writing) available as a PDF download from http://www.nitrc.org/projects/gifti/ """ + from __future__ import annotations import base64 diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 7e4c223971..f27546afe7 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -1,5 +1,5 @@ -"""Testing gifti objects -""" +"""Testing gifti objects""" + import itertools import sys from io import BytesIO diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index b36131ed94..20cf1cac9c 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Define supported image classes and names""" + from __future__ import annotations from .analyze import AnalyzeImage diff --git a/nibabel/imageglobals.py b/nibabel/imageglobals.py index 551719a7ee..81a1742809 100644 --- a/nibabel/imageglobals.py +++ b/nibabel/imageglobals.py @@ -23,6 +23,7 @@ Use ``logger.level = 1`` to see all messages. """ + import logging error_level = 40 diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index 38dc9d3f16..36fbddee0e 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Functions for computing image statistics""" + import numpy as np from nibabel.imageclasses import spatial_axes_first diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 463a687975..159d9bae82 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -8,6 +8,7 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # module imports """Utilities to load and save image objects""" + from __future__ import annotations import os diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 5f8422bc23..d0b9fd5375 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read MINC1 format images""" + from __future__ import annotations from numbers import Integral diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 912b5d28ae..161be5c111 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -25,6 +25,7 @@ mincstats my_funny.mnc """ + import warnings import numpy as np diff --git a/nibabel/nicom/__init__.py b/nibabel/nicom/__init__.py index 3a389db172..d15e0846ff 100644 --- a/nibabel/nicom/__init__.py +++ b/nibabel/nicom/__init__.py @@ -19,6 +19,7 @@ dwiparams structreader """ + import warnings warnings.warn( diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index be6da9786c..0966de2a96 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -3,6 +3,7 @@ """ Parse the "ASCCONV" meta data format found in a variety of Siemens MR files. """ + import ast import re from collections import OrderedDict diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 40f3f852d9..df379e0be8 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -1,5 +1,5 @@ -"""CSA header reader from SPM spec -""" +"""CSA header reader from SPM spec""" + import numpy as np from .structreader import Unpacker diff --git a/nibabel/nicom/dwiparams.py b/nibabel/nicom/dwiparams.py index cb0e501202..5930e96f91 100644 --- a/nibabel/nicom/dwiparams.py +++ b/nibabel/nicom/dwiparams.py @@ -18,6 +18,7 @@ B ~ (q_est . q_est.T) / norm(q_est) """ + import numpy as np import numpy.linalg as npl diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index cf40298c56..afe5f05e13 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -1,5 +1,4 @@ -"""Testing Siemens "ASCCONV" parser -""" +"""Testing Siemens "ASCCONV" parser""" from collections import OrderedDict from os.path import dirname diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index ddb46a942a..f31f4a3935 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -1,5 +1,5 @@ -"""Testing Siemens CSA header reader -""" +"""Testing Siemens CSA header reader""" + import gzip from copy import deepcopy from os.path import join as pjoin diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index 17ea7430f2..d508343be1 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -1,5 +1,4 @@ -"""Testing reading DICOM files -""" +"""Testing reading DICOM files""" from os.path import join as pjoin diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index fa2dfc07c6..e96607df9e 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -1,5 +1,4 @@ -"""Testing DICOM wrappers -""" +"""Testing DICOM wrappers""" import gzip from copy import copy diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index 6e98b4af61..559c0a2143 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -1,5 +1,4 @@ -"""Testing diffusion parameter processing -""" +"""Testing diffusion parameter processing""" import numpy as np import pytest diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index 2d37bbc3ed..ccd2dd4f85 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -1,5 +1,5 @@ -"""Testing Siemens CSA header reader -""" +"""Testing Siemens CSA header reader""" + import struct import sys diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index ea3b999fad..4f0d7e68d5 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -1,5 +1,5 @@ -"""Testing nicom.utils module -""" +"""Testing nicom.utils module""" + import re from nibabel.optpkg import optional_package diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 617ff2a28a..24f4afc2fe 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,5 +1,4 @@ -"""Utilities for working with DICOM datasets -""" +"""Utilities for working with DICOM datasets""" def find_private_section(dcm_data, group_no, creator): diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 4cf1e52748..d07e54de18 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -10,6 +10,7 @@ NIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/ """ + from __future__ import annotations import warnings diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 8d9b81e1f9..9c898b47ba 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -12,6 +12,7 @@ https://www.nitrc.org/forum/message.php?msg_id=3738 """ + import numpy as np from .analyze import AnalyzeHeader diff --git a/nibabel/onetime.py b/nibabel/onetime.py index e365e81f74..fa1b2f9927 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -18,6 +18,7 @@ [2] Python data model, https://docs.python.org/reference/datamodel.html """ + from __future__ import annotations import typing as ty @@ -136,12 +137,12 @@ def __init__(self, func: ty.Callable[[InstanceT], T]) -> None: @ty.overload def __get__( self, obj: None, objtype: type[InstanceT] | None = None - ) -> ty.Callable[[InstanceT], T]: - ... # pragma: no cover + ) -> ty.Callable[[InstanceT], T]: ... # pragma: no cover @ty.overload - def __get__(self, obj: InstanceT, objtype: type[InstanceT] | None = None) -> T: - ... # pragma: no cover + def __get__( + self, obj: InstanceT, objtype: type[InstanceT] | None = None + ) -> T: ... # pragma: no cover def __get__( self, obj: InstanceT | None, objtype: type[InstanceT] | None = None diff --git a/nibabel/openers.py b/nibabel/openers.py index d69412fb85..f84ccb7069 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Context manager openers for various fileobject types""" + from __future__ import annotations import gzip @@ -35,11 +36,9 @@ @ty.runtime_checkable class Fileish(ty.Protocol): - def read(self, size: int = -1, /) -> bytes: - ... # pragma: no cover + def read(self, size: int = -1, /) -> bytes: ... # pragma: no cover - def write(self, b: bytes, /) -> int | None: - ... # pragma: no cover + def write(self, b: bytes, /) -> int | None: ... # pragma: no cover class DeterministicGzipFile(gzip.GzipFile): diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index b59a89bb35..bfe6a629cc 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,4 +1,5 @@ """Routines to support optional packages""" + from __future__ import annotations import typing as ty diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 075cbd4ffd..7265bf56f3 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for calculating and applying affine orientations""" + import numpy as np import numpy.linalg as npl diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 3a8a6030de..d04f683d1d 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -121,6 +121,7 @@ utility via the option "--strict-sort". The dimension info can be exported to a CSV file by adding the option "--volume-info". """ + import re import warnings from collections import OrderedDict diff --git a/nibabel/pointset.py b/nibabel/pointset.py index 58fca148a8..e39a4d4187 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -17,6 +17,7 @@ adjacent points to be identified. A *triangular mesh* in particular uses triplets of adjacent vertices to describe faces. """ + from __future__ import annotations import math @@ -40,12 +41,12 @@ class CoordinateArray(ty.Protocol): shape: tuple[int, int] @ty.overload - def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: - ... # pragma: no cover + def __array__( + self, dtype: None = ..., / + ) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... # pragma: no cover @ty.overload - def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: - ... # pragma: no cover + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... # pragma: no cover @dataclass diff --git a/nibabel/processing.py b/nibabel/processing.py index d634ce7086..6027575d47 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -16,6 +16,7 @@ Smoothing and resampling routines need scipy. """ + import numpy as np import numpy.linalg as npl diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index d61c880117..76423b40a8 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -19,6 +19,7 @@ A deprecated copy is available here for backward compatibility. """ + from __future__ import annotations import warnings diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index d2fc3ac4ca..77cf8d2d3f 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -25,6 +25,7 @@ >>> vec = np.array([1, 2, 3]).reshape((3,1)) # column vector >>> tvec = np.dot(M, vec) """ + import math import numpy as np diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index 625a2af477..cb40633e54 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -2,6 +2,7 @@ * Make ReST table given array of values """ + import numpy as np diff --git a/nibabel/spaces.py b/nibabel/spaces.py index e5b87171df..d06a39b0ed 100644 --- a/nibabel/spaces.py +++ b/nibabel/spaces.py @@ -19,6 +19,7 @@ mapping), or * a length 2 sequence with the same information (shape, affine). """ + from itertools import product import numpy as np diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index bcc4336f73..185694cd72 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -129,6 +129,7 @@ >>> np.all(img3.get_fdata(dtype=np.float32) == data) True """ + from __future__ import annotations import io @@ -161,23 +162,18 @@ class HasDtype(ty.Protocol): - def get_data_dtype(self) -> np.dtype: - ... # pragma: no cover + def get_data_dtype(self) -> np.dtype: ... # pragma: no cover - def set_data_dtype(self, dtype: npt.DTypeLike) -> None: - ... # pragma: no cover + def set_data_dtype(self, dtype: npt.DTypeLike) -> None: ... # pragma: no cover @ty.runtime_checkable class SpatialProtocol(ty.Protocol): - def get_data_dtype(self) -> np.dtype: - ... # pragma: no cover + def get_data_dtype(self) -> np.dtype: ... # pragma: no cover - def get_data_shape(self) -> ty.Tuple[int, ...]: - ... # pragma: no cover + def get_data_shape(self) -> ty.Tuple[int, ...]: ... # pragma: no cover - def get_zooms(self) -> ty.Tuple[float, ...]: - ... # pragma: no cover + def get_zooms(self) -> ty.Tuple[float, ...]: ... # pragma: no cover class HeaderDataError(Exception): diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index f63785807c..9c4c544cf5 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read / write access to SPM2 version of analyze image format""" + import numpy as np from . import spm99analyze as spm99 # module import diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 3465c57190..7be6c240d4 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read / write access to SPM99 version of analyze image format""" + import warnings from io import BytesIO diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index f3cbd2da59..24a7e01469 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -1,5 +1,4 @@ -"""Multiformat-capable streamline format read / write interface -""" +"""Multiformat-capable streamline format read / write interface""" # ruff: noqa: F401 import os diff --git a/nibabel/streamlines/header.py b/nibabel/streamlines/header.py index 2aed10c62c..a3b52b0747 100644 --- a/nibabel/streamlines/header.py +++ b/nibabel/streamlines/header.py @@ -1,5 +1,4 @@ -"""Field class defining common header fields in tractogram files -""" +"""Field class defining common header fields in tractogram files""" class Field: diff --git a/nibabel/streamlines/tests/test_tractogram_file.py b/nibabel/streamlines/tests/test_tractogram_file.py index 71e2326ecf..6f764009f1 100644 --- a/nibabel/streamlines/tests/test_tractogram_file.py +++ b/nibabel/streamlines/tests/test_tractogram_file.py @@ -1,5 +1,4 @@ -"""Test tractogramFile base class -""" +"""Test tractogramFile base class""" import pytest diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index 2cec1ea9cb..557261e9a0 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -1,5 +1,5 @@ -"""Define abstract interface for Tractogram file classes -""" +"""Define abstract interface for Tractogram file classes""" + from abc import ABC, abstractmethod from .header import Field diff --git a/nibabel/testing/helpers.py b/nibabel/testing/helpers.py index 2f25a354d7..ae859d6572 100644 --- a/nibabel/testing/helpers.py +++ b/nibabel/testing/helpers.py @@ -1,5 +1,5 @@ -"""Helper functions for tests -""" +"""Helper functions for tests""" + from io import BytesIO import numpy as np diff --git a/nibabel/testing/np_features.py b/nibabel/testing/np_features.py index eeb783900a..226df64845 100644 --- a/nibabel/testing/np_features.py +++ b/nibabel/testing/np_features.py @@ -1,5 +1,5 @@ -"""Look for changes in numpy behavior over versions -""" +"""Look for changes in numpy behavior over versions""" + from functools import lru_cache import numpy as np diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index 8ade7f539c..244b4c3a64 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -21,6 +21,7 @@ The *_cor_SENSE* image has a higher RMS because the back of the phantom is out of the field of view. """ + import glob import numpy as np diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index 598726fe74..7fd05d936e 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -5,6 +5,7 @@ * standard.trk """ + import numpy as np import nibabel as nib diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 1f89c9c1a1..5919eba925 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -1,5 +1,4 @@ -"""Functions / decorators for finding / requiring nibabel-data directory -""" +"""Functions / decorators for finding / requiring nibabel-data directory""" import unittest from os import environ, listdir diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 1e8b1fdda2..2f3de50791 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -12,6 +12,7 @@ assert_equal(code, 0) assert_equal(stdout, b'This script ran OK') """ + import os import sys from os.path import dirname, isdir, isfile, pathsep, realpath diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 1d21092eef..a4e787465a 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,5 +1,5 @@ -"""Metaclass and class for validating instance APIs -""" +"""Metaclass and class for validating instance APIs""" + import os import pytest diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index a207e4ed6d..a79f63bc72 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Tests for arrayproxy module -""" +"""Tests for arrayproxy module""" import contextlib import gzip diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index 84590452ea..5cae764c8b 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Tests for BatteryRunner and Report objects -""" +"""Tests for BatteryRunner and Report objects""" import logging from io import StringIO diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index f345952aac..d4cf81515a 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -1,5 +1,5 @@ -"""Test casting utilities -""" +"""Test casting utilities""" + import os from platform import machine diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 3ccb4963ca..cca8d0ba81 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for data module""" + import os import sys import tempfile @@ -26,7 +27,7 @@ @pytest.fixture -def with_nimd_env(request, with_environment): +def with_nimd_env(request): DATA_FUNCS = {} DATA_FUNCS['home_dir_func'] = nibd.get_nipy_user_dir DATA_FUNCS['sys_dir_func'] = nibd.get_nipy_system_dir diff --git a/nibabel/tests/test_dataobj_images.py b/nibabel/tests/test_dataobj_images.py index a1d2dbc9f1..877e407812 100644 --- a/nibabel/tests/test_dataobj_images.py +++ b/nibabel/tests/test_dataobj_images.py @@ -1,5 +1,4 @@ -"""Testing dataobj_images module -""" +"""Testing dataobj_images module""" import numpy as np diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index 2576eca3d9..f1c3d517c9 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -1,5 +1,4 @@ -"""Testing `deprecated` module -""" +"""Testing `deprecated` module""" import warnings diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 833908af94..eedeec4852 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -1,5 +1,4 @@ -"""Testing deprecator module / Deprecator class -""" +"""Testing deprecator module / Deprecator class""" import sys import warnings diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index f756600fd3..654af98279 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -1,5 +1,4 @@ -"""Testing dft -""" +"""Testing dft""" import os import sqlite3 diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py index fee71d628b..798a7f7b30 100644 --- a/nibabel/tests/test_diff.py +++ b/nibabel/tests/test_diff.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""Test diff -""" +"""Test diff""" from os.path import abspath, dirname from os.path import join as pjoin diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index 23485ae92b..427645b92a 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Test we can correctly import example ECAT files -""" +"""Test we can correctly import example ECAT files""" import os from os.path import join as pjoin diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index afb6d36f84..aa58d9b8e0 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -1,5 +1,4 @@ -"""Testing environment settings -""" +"""Testing environment settings""" import os from os import environ as env diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index 3aa1ae78c5..7d162c0917 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -1,5 +1,4 @@ -"""Testing filebasedimages module -""" +"""Testing filebasedimages module""" import warnings from itertools import product diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index 33b3f76e6f..83fe75aecc 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -1,5 +1,4 @@ -"""Testing fileholders -""" +"""Testing fileholders""" from io import BytesIO diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 5d352f72dd..4e53cb2e5d 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for filename container""" + import pathlib import pytest diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 52557d353d..07e394eca4 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing filesets - a draft -""" +"""Testing filesets - a draft""" from io import BytesIO diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index e9f65e45a2..355743b04e 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -1,6 +1,5 @@ """Test slicing of file-like objects""" - import time from functools import partial from io import BytesIO diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index 21c7676fce..bc202c6682 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -6,9 +6,7 @@ # copyright and license terms. # # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing fileutils module -""" - +"""Testing fileutils module""" import pytest diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index c2ccd44039..82c8e667a9 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -1,5 +1,5 @@ -"""Test floating point deconstructions and floor methods -""" +"""Test floating point deconstructions and floor methods""" + import sys import numpy as np diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 86c04985f8..5898762322 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -50,7 +50,6 @@ clear_and_catch_warnings, deprecated_to, expires, - nullcontext, ) from .. import ( diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 4e787f0d71..934698d9e6 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for loader function""" + import logging import pathlib import shutil diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 90424b7d34..7b3add6cd0 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -1,5 +1,4 @@ -"""Testing imageclasses module -""" +"""Testing imageclasses module""" from os.path import dirname from os.path import join as pjoin diff --git a/nibabel/tests/test_imageglobals.py b/nibabel/tests/test_imageglobals.py index ac043d192b..9de72e87c6 100644 --- a/nibabel/tests/test_imageglobals.py +++ b/nibabel/tests/test_imageglobals.py @@ -6,8 +6,8 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Tests for imageglobals module -""" +"""Tests for imageglobals module""" + from .. import imageglobals as igs diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 401ed04535..d039263bd1 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -1,5 +1,4 @@ -"""Testing loadsave module -""" +"""Testing loadsave module""" import pathlib import shutil diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index e96e716699..a5ea38a8a9 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Test we can correctly import example MINC2_PATH files -""" +"""Test we can correctly import example MINC2_PATH files""" import os from os.path import join as pjoin diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index 848579cee6..02b9da5482 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -6,9 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing mriutils module -""" - +"""Testing mriutils module""" import pytest from numpy.testing import assert_almost_equal diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index 0c7116e9a0..7e319ac3f5 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -1,5 +1,4 @@ -"""Tests for ``get_nibabel_data`` -""" +"""Tests for ``get_nibabel_data``""" import os from os.path import dirname, isdir, realpath diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index a5b9427bc4..5ee4fb3c15 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for nifti reading package""" + import os import struct import unittest diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index a25e23b49d..01d44c1595 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for nifti2 reading package""" + import os import numpy as np diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index b22a4ef3ec..4d72949271 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,5 +1,3 @@ -import pytest - from nibabel.onetime import auto_attr, setattr_on_read from nibabel.testing import deprecated_to, expires diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 5c6a1643cc..15290d5ef9 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for openers module""" + import contextlib import hashlib import os diff --git a/nibabel/tests/test_optpkg.py b/nibabel/tests/test_optpkg.py index 7ffaa2f851..c243633a07 100644 --- a/nibabel/tests/test_optpkg.py +++ b/nibabel/tests/test_optpkg.py @@ -1,5 +1,4 @@ -"""Testing optpkg module -""" +"""Testing optpkg module""" import builtins import sys diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 7e4a33e29f..e7c32d7867 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Testing for orientations module""" - import numpy as np import pytest from numpy.testing import assert_array_equal diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 980a2f403f..a312c558a8 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -1,5 +1,4 @@ -"""Testing parrec module -""" +"""Testing parrec module""" from glob import glob from os.path import basename, dirname diff --git a/nibabel/tests/test_parrec_data.py b/nibabel/tests/test_parrec_data.py index 2a52d97250..02a1d5733a 100644 --- a/nibabel/tests/test_parrec_data.py +++ b/nibabel/tests/test_parrec_data.py @@ -1,5 +1,4 @@ -"""Test we can correctly import example PARREC files -""" +"""Test we can correctly import example PARREC files""" import unittest from glob import glob diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 1422bb3351..c927b0fb9e 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -1,5 +1,4 @@ -"""Testing package info -""" +"""Testing package info""" import pytest diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index 27da6639c0..f1a4f0a909 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing processing module -""" +"""Testing processing module""" import logging from os.path import dirname diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index 847b7a4eee..eab1969857 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -1,5 +1,4 @@ -"""Test printable table -""" +"""Test printable table""" import numpy as np import pytest diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index dbfe533890..f5e467b2cc 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -1,5 +1,4 @@ -"""Tests for spaces module -""" +"""Tests for spaces module""" import numpy as np import numpy.linalg as npl diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index a5cab9e751..3d14dac18d 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing spatialimages -""" +"""Testing spatialimages""" from io import BytesIO diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 1ca1fb9b97..6b84725218 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -1,5 +1,4 @@ -"""Tests for warnings context managers -""" +"""Tests for warnings context managers""" import os import sys diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index f172d5c579..bcc81b5f5f 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -1,5 +1,4 @@ -"""Testing tripwire module -""" +"""Testing tripwire module""" import pytest diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index e18fb0210a..0eb906fee7 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -23,6 +23,7 @@ _field_recoders -> field_recoders """ + import logging from io import BytesIO, StringIO diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 49d69d2bf2..9d67f6acb7 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Contexts for *with* statement providing temporary directories""" + import os import tempfile from contextlib import contextmanager diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index fa45e73382..efe651fd93 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -1,4 +1,5 @@ """Class to raise error for missing modules or other misfortunes""" + from typing import Any diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 60ebd3a256..1e927544ba 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -3,6 +3,7 @@ Includes version of OrthoSlicer3D code originally written by our own Paul Ivanov. """ + import weakref import numpy as np diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 90e5e5ff35..cf2437e621 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utility functions for analyze-like formats""" + from __future__ import annotations import io @@ -1190,13 +1191,13 @@ def _ftype4scaled_finite( @ty.overload def finite_range( arr: npt.ArrayLike, check_nan: ty.Literal[False] = False -) -> tuple[Scalar, Scalar]: - ... # pragma: no cover +) -> tuple[Scalar, Scalar]: ... # pragma: no cover @ty.overload -def finite_range(arr: npt.ArrayLike, check_nan: ty.Literal[True]) -> tuple[Scalar, Scalar, bool]: - ... # pragma: no cover +def finite_range( + arr: npt.ArrayLike, check_nan: ty.Literal[True] +) -> tuple[Scalar, Scalar, bool]: ... # pragma: no cover def finite_range( diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index 6e236d7356..5ffe04bc78 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -109,6 +109,7 @@ nib.imageglobals.logger = logger """ + from __future__ import annotations import numpy as np diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index d3a7a08309..5049a76412 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Thin layer around xml.etree.ElementTree, to abstract nibabel xml support""" + from io import BytesIO from xml.etree.ElementTree import Element, SubElement, tostring # noqa from xml.parsers.expat import ParserCreate diff --git a/tox.ini b/tox.ini index 53860445aa..2e6a2449e6 100644 --- a/tox.ini +++ b/tox.ini @@ -142,7 +142,7 @@ deps = ruff>=0.3.0 skip_install = true commands = - ruff --diff nibabel + ruff check --diff nibabel ruff format --diff nibabel [testenv:style-fix] From a6f2a61f16308d7a3dcb968e60b2ffce1f7cbc53 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:14:21 +0100 Subject: [PATCH 538/702] MNT: get rid of .flake8/.pep8speaks.yml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are made obsolete by teh flake8 → ruff shift. --- .flake8 | 9 --------- .pep8speaks.yml | 12 ------------ 2 files changed, 21 deletions(-) delete mode 100644 .flake8 delete mode 100644 .pep8speaks.yml diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 9fe631ac81..0000000000 --- a/.flake8 +++ /dev/null @@ -1,9 +0,0 @@ -[flake8] -max-line-length = 100 -extend-ignore = E203,E266,E402,E731 -exclude = - *test* - *sphinx* - nibabel/externals/* -per-file-ignores = - */__init__.py: F401 diff --git a/.pep8speaks.yml b/.pep8speaks.yml deleted file mode 100644 index 0a0d8c619f..0000000000 --- a/.pep8speaks.yml +++ /dev/null @@ -1,12 +0,0 @@ -scanner: - diff_only: True # Only show errors caused by the patch - linter: flake8 - -message: # Customize the comment made by the bot - opened: # Messages when a new PR is submitted - header: "Hello @{name}, thank you for submitting the Pull Request!" - footer: "To test for issues locally, `pip install flake8` and then run `flake8 nibabel`." - updated: # Messages when new commits are added to the PR - header: "Hello @{name}, Thank you for updating!" - footer: "To test for issues locally, `pip install flake8` and then run `flake8 nibabel`." - no_errors: "Cheers! There are no style issues detected in this Pull Request. :beers: " From ac29ed26d403791f5868ac10056136a5ce66ddd7 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:25:11 +0100 Subject: [PATCH 539/702] MNT: ignore F401 in __init__.py Enforce that in pyproject.toml instead of __init__.py itself. --- nibabel/__init__.py | 2 -- nibabel/cifti2/__init__.py | 1 - nibabel/freesurfer/__init__.py | 2 -- nibabel/gifti/__init__.py | 2 -- nibabel/parrec.py | 2 +- nibabel/streamlines/__init__.py | 1 - nibabel/testing/__init__.py | 2 -- pyproject.toml | 3 +++ 8 files changed, 4 insertions(+), 11 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 1cb7abf53f..aa90540b8f 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -7,8 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# ruff: noqa: F401 - import os from .info import long_description as __doc__ diff --git a/nibabel/cifti2/__init__.py b/nibabel/cifti2/__init__.py index 4a5cad7675..9c6805f818 100644 --- a/nibabel/cifti2/__init__.py +++ b/nibabel/cifti2/__init__.py @@ -6,7 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# ruff: noqa: F401 """CIFTI-2 format IO .. currentmodule:: nibabel.cifti2 diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index aa76eb2e89..1ab3859756 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -1,7 +1,5 @@ """Reading functions for freesurfer files""" -# ruff: noqa: F401 - from .io import ( read_annot, read_geometry, diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index d2a1e2da65..f54a1d2e54 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -16,8 +16,6 @@ gifti """ -# ruff: noqa: F401 - from .gifti import ( GiftiCoordSystem, GiftiDataArray, diff --git a/nibabel/parrec.py b/nibabel/parrec.py index d04f683d1d..8b3ffb34a2 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -7,7 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Disable line length checking for PAR fragments in module docstring -# flake8: noqa E501 +# noqa: E501 """Read images in PAR/REC format This is yet another MRI image format generated by Philips scanners. It is an diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 24a7e01469..dd00a1e842 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -1,5 +1,4 @@ """Multiformat-capable streamline format read / write interface""" -# ruff: noqa: F401 import os import warnings diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index a3e98e064b..d335c9a8c6 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -8,8 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for testing""" -# ruff: noqa: F401 - from __future__ import annotations import os diff --git a/pyproject.toml b/pyproject.toml index 515c35850b..5df6d01896 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -133,6 +133,9 @@ ignore = [ "ISC002", ] +[tool.ruff.lint.per-file-ignores] +"__init__.py" = ["F401"] + [tool.ruff.format] quote-style = "single" From d3352aef6991f1df8013d6bdc67aca56288dd346 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:27:28 +0100 Subject: [PATCH 540/702] =?UTF-8?q?MNT:=20ruff=200.3.0=20=E2=86=92=200.3.4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d35d287579..354bd3da1d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: - id: check-merge-conflict - id: check-vcs-permalinks - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.0 + rev: v0.3.4 hooks: - id: ruff args: [--fix, --show-fix, --exit-non-zero-on-fix] From f57f5cbc4bb8d62861ee0c00931c134e4a66e0d7 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:29:51 +0100 Subject: [PATCH 541/702] Update doc/tools/apigen.py Co-authored-by: Chris Markiewicz --- doc/tools/apigen.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index a1279a3e98..336c81d8d8 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -405,9 +405,7 @@ def discover_modules(self): def write_modules_api(self, modules, outdir): # upper-level modules - ulms = [ - '.'.join(m.split('.')[:2]) if m.count('.') >= 1 else m.split('.')[0] for m in modules - ] + ulms = ['.'.join(m.split('.')[:2]) for m in modules] from collections import OrderedDict From 1684a9dada92558b44ce7995f2050f5111f1ec33 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:30:36 +0100 Subject: [PATCH 542/702] Update nibabel/cifti2/tests/test_cifti2io_header.py Co-authored-by: Chris Markiewicz --- nibabel/cifti2/tests/test_cifti2io_header.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 92078a26d7..1c37cfe0e7 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -37,7 +37,7 @@ def test_space_separated_affine(): - _ = ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) + ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) def test_read_nifti2(): From a8ba819a26a15d6be2ea5c2bb6d6eaaaf89cef93 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:32:21 +0100 Subject: [PATCH 543/702] Update nibabel/gifti/tests/test_gifti.py Co-authored-by: Chris Markiewicz --- nibabel/gifti/tests/test_gifti.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index f27546afe7..88a2f31f8e 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -328,7 +328,7 @@ def test_metadata_list_interface(): assert len(md) == 0 # Extension adds multiple keys - with pytest.warns(DeprecationWarning) as _: + with deprecated_to('6.0'): foobar = GiftiNVPairs('foo', 'bar') mdlist.extend([nvpair, foobar]) assert len(mdlist) == 2 From d797ffe10431a4c62322d81495873bf01e277e72 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:32:37 +0100 Subject: [PATCH 544/702] Update nibabel/tests/test_pkg_info.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_pkg_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index c927b0fb9e..a39eac65b1 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -14,7 +14,7 @@ def test_pkg_info(): - nibabel.pkg_info.get_pkg_info - nibabel.pkg_info.pkg_commit_hash """ - _ = nib.get_info() + nib.get_info() def test_version(): From 59d6291ac98b1be6784b88291b9826220c1f7241 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:32:54 +0100 Subject: [PATCH 545/702] Update nibabel/gifti/tests/test_gifti.py Co-authored-by: Chris Markiewicz --- nibabel/gifti/tests/test_gifti.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 88a2f31f8e..a4cf5bb485 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -336,7 +336,7 @@ def test_metadata_list_interface(): assert md == {'key': 'value', 'foo': 'bar'} # Insertion updates list order, though we don't attempt to preserve it in the dict - with pytest.warns(DeprecationWarning) as _: + with deprecated_to('6.0'): lastone = GiftiNVPairs('last', 'one') mdlist.insert(1, lastone) assert len(mdlist) == 3 From 6daadc82d4634b7c9d31cc65f7de288ef67328ad Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:33:16 +0100 Subject: [PATCH 546/702] Update nibabel/tests/test_spatialimages.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_spatialimages.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 3d14dac18d..baf470090b 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -398,7 +398,10 @@ def test_slicer(self): img_klass = self.image_class in_data_template = np.arange(240, dtype=np.int16) base_affine = np.eye(4) - for dshape in ((4, 5, 6, 2), (8, 5, 6)): # Time series # Volume + for dshape in ( + (4, 5, 6, 2), # Time series + (8, 5, 6), # Volume + ): in_data = in_data_template.copy().reshape(dshape) img = img_klass(in_data, base_affine.copy()) From 32d0109c3a26fed6ac49d91613bef7193f324aac Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:33:29 +0100 Subject: [PATCH 547/702] Update nibabel/tests/test_testing.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_testing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 6b84725218..c9f91eb849 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -133,7 +133,7 @@ def test_warn_ignore(): with suppress_warnings(): warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) - with suppress_warnings() as _: + with suppress_warnings(): warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) assert n_warns == len(warnings.filters) From 9104d2fcfbd914494d5d4626c5a5bad0a675d6d0 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:33:38 +0100 Subject: [PATCH 548/702] Update tox.ini Co-authored-by: Chris Markiewicz --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 2e6a2449e6..d0b8653457 100644 --- a/tox.ini +++ b/tox.ini @@ -152,7 +152,7 @@ deps = ruff skip_install = true commands = - ruff --fix nibabel + ruff check --fix nibabel ruff format nibabel [testenv:spellcheck] From 02918edececbef01d9d536c4452cb17c0fd35955 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:34:08 +0100 Subject: [PATCH 549/702] Update nibabel/tests/test_testing.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_testing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index c9f91eb849..04ba813d8b 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -113,7 +113,7 @@ def test_warn_error(): with error_warnings(): with pytest.raises(UserWarning): warnings.warn('A test') - with error_warnings() as _: + with error_warnings(): with pytest.raises(UserWarning): warnings.warn('A test') assert n_warns == len(warnings.filters) From eeab46f658d9cf754ea9aeda5e3836553e6139d3 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:53:36 +0100 Subject: [PATCH 550/702] Update nibabel/gifti/tests/test_gifti.py Co-authored-by: Chris Markiewicz --- nibabel/gifti/tests/test_gifti.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index a4cf5bb485..6c867ad25b 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -360,7 +360,7 @@ def test_metadata_list_interface(): assert 'completelynew' not in md assert md == {'foo': 'bar', 'last': 'one'} # Check popping from the end (last one inserted before foobar) - _ = mdlist.pop() + mdlist.pop() assert len(mdlist) == 1 assert len(md) == 1 assert md == {'last': 'one'} From 46c84879dc2952b69508ad489927437ad1e471ab Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:53:51 +0100 Subject: [PATCH 551/702] Update nibabel/gifti/tests/test_gifti.py Co-authored-by: Chris Markiewicz --- nibabel/gifti/tests/test_gifti.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 6c867ad25b..1cead0d928 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -366,7 +366,7 @@ def test_metadata_list_interface(): assert md == {'last': 'one'} # And let's remove an old pair with a new object - with pytest.warns(DeprecationWarning) as _: + with deprecated_to('6.0'): lastoneagain = GiftiNVPairs('last', 'one') mdlist.remove(lastoneagain) assert len(mdlist) == 0 From dec3a2dba421db615aaa9c85cd53d002b4af5644 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:54:03 +0100 Subject: [PATCH 552/702] Update nibabel/gifti/tests/test_parse_gifti_fast.py Co-authored-by: Chris Markiewicz --- nibabel/gifti/tests/test_parse_gifti_fast.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 17258fbd30..c562b90480 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -447,7 +447,7 @@ def test_external_file_failure_cases(): shutil.copy(DATA_FILE7, '.') filename = pjoin(tmpdir, basename(DATA_FILE7)) with pytest.raises(GiftiParseError): - _ = load(filename) + load(filename) # load from in-memory xml string (parser requires it as bytes) with open(DATA_FILE7, 'rb') as f: xmldata = f.read() From d8d3a4489c67b84e61b0d2aa190bb2b31b5d3a1e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:54:17 +0100 Subject: [PATCH 553/702] Update nibabel/gifti/tests/test_parse_gifti_fast.py Co-authored-by: Chris Markiewicz --- nibabel/gifti/tests/test_parse_gifti_fast.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index c562b90480..8cb7c96794 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -453,7 +453,7 @@ def test_external_file_failure_cases(): xmldata = f.read() parser = GiftiImageParser() with pytest.raises(GiftiParseError): - _ = parser.parse(xmldata) + parser.parse(xmldata) def test_load_compressed(): From de9f2b0a2a246b60cb6bcae8780df000a70cd59d Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:54:29 +0100 Subject: [PATCH 554/702] Update nibabel/nicom/tests/test_dicomwrappers.py Co-authored-by: Chris Markiewicz --- nibabel/nicom/tests/test_dicomwrappers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index e96607df9e..d14c35dcdb 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -630,7 +630,7 @@ def test_image_position(self): def test_affine(self): # Make sure we find orientation/position/spacing info dw = didw.wrapper_from_file(DATA_FILE_4D) - _ = dw.affine + dw.affine @dicom_test @pytest.mark.xfail(reason='Not packaged in install', raises=FileNotFoundError) From b7a5f5aa644ca645499a88438360d814ef377769 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:54:45 +0100 Subject: [PATCH 555/702] Update nibabel/streamlines/tests/test_tck.py Co-authored-by: Chris Markiewicz --- nibabel/streamlines/tests/test_tck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index 6b4c163ed6..083ab8e6e9 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -137,7 +137,7 @@ def test_load_file_with_wrong_information(self): # Simulate a TCK file with no `file` field. new_tck_file = tck_file.replace(b'\nfile: . 67', b'') - with pytest.warns(HeaderWarning, match="Missing 'file'") as _: + with pytest.warns(HeaderWarning, match="Missing 'file'"): tck = TckFile.load(BytesIO(new_tck_file)) assert_array_equal(tck.header['file'], '. 56') From a621d41987ae64f964fe71b800a59771981f4130 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:55:04 +0100 Subject: [PATCH 556/702] Update nibabel/streamlines/tests/test_trk.py Co-authored-by: Chris Markiewicz --- nibabel/streamlines/tests/test_trk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index 749bf3ed30..4cb6032c25 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -149,7 +149,7 @@ def test_load_file_with_wrong_information(self): # Simulate a TRK where `vox_to_ras` is invalid. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct[Field.VOXEL_TO_RASMM] = np.diag([0, 0, 0, 1]) - with clear_and_catch_warnings(record=True, modules=[trk_module]) as _: + with clear_and_catch_warnings(modules=[trk_module]): with pytest.raises(HeaderError): TrkFile.load(BytesIO(trk_bytes)) From 37ff0ebe9dd035f99d28ded561928f58315fdb68 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:55:31 +0100 Subject: [PATCH 557/702] Update nibabel/tests/test_affines.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_affines.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 1d7ef1e6bf..d4ea11821b 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -225,7 +225,6 @@ def test_rescale_affine(): orig_shape = rng.randint(low=20, high=512, size=(3,)) orig_aff = np.eye(4) orig_aff[:3, :] = rng.normal(size=(3, 4)) - orig_zooms = voxel_sizes(orig_aff) # noqa: F841 orig_axcodes = aff2axcodes(orig_aff) orig_centroid = apply_affine(orig_aff, (orig_shape - 1) // 2) From 36d36fbddcdb1c0cb4f3fc503452291ba90971a6 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:55:51 +0100 Subject: [PATCH 558/702] Update nibabel/tests/test_arraywriters.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_arraywriters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 2fc9c32358..25040e5eed 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -276,7 +276,7 @@ def test_slope_inter_castable(): for out_dtt in NUMERIC_TYPES: for klass in (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter): arr = np.zeros((5,), dtype=in_dtt) - _ = klass(arr, out_dtt) # no error + klass(arr, out_dtt) # no error # Test special case of none finite # This raises error for ArrayWriter, but not for the others arr = np.array([np.inf, np.nan, -np.inf]) From 0922369b170a38215b9cc6d0d2ce69d668f579c1 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:56:13 +0100 Subject: [PATCH 559/702] Update nibabel/tests/test_arraywriters.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_arraywriters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 25040e5eed..4a853ecf5e 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -285,8 +285,8 @@ def test_slope_inter_castable(): in_arr = arr.astype(in_dtt) with pytest.raises(WriterError): ArrayWriter(in_arr, out_dtt) - _ = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error - _ = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error + SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error + SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error for in_dtt, out_dtt, arr, slope_only, slope_inter, neither in ( (np.float32, np.float32, 1, True, True, True), (np.float64, np.float32, 1, True, True, True), From 50177cc9e521716234510dab8c4bd48892c40b6a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:56:53 +0100 Subject: [PATCH 560/702] Update nibabel/tests/test_image_load_save.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_image_load_save.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 934698d9e6..0e5fd57d08 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -131,7 +131,7 @@ def test_save_load(): affine[:3, 3] = [3, 2, 1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) - with InTemporaryDirectory() as _: + with InTemporaryDirectory(): nifn = 'an_image.nii' sifn = 'another_image.img' ni1.save(img, nifn) From 489b9d29795d33c46a4b2e0e079a22bb4a6e9a1e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:57:11 +0100 Subject: [PATCH 561/702] Update nibabel/tests/test_imageclasses.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_imageclasses.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 7b3add6cd0..90ef966d2d 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -6,7 +6,6 @@ import numpy as np import nibabel as nib -from nibabel import imageclasses # noqa: F401 from nibabel.analyze import AnalyzeImage from nibabel.imageclasses import spatial_axes_first from nibabel.nifti1 import Nifti1Image From 72c0ebf96f2081eee22bab5b167e12306a4693a3 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:57:41 +0100 Subject: [PATCH 562/702] Update nibabel/tests/test_minc2.py Co-authored-by: Chris Markiewicz --- nibabel/tests/test_minc2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index 7ab29edfde..4c2973a728 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -129,5 +129,5 @@ def test_bad_diminfo(): # File has a bad spacing field 'xspace' when it should be # `irregular`, `regular__` or absent (default to regular__). # We interpret an invalid spacing as absent, but warn. - with pytest.warns(UserWarning) as _: + with pytest.warns(UserWarning): Minc2Image.from_filename(fname) From 223fdc072ee22034c5388a824e350aafb5c8914a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:05:08 +0100 Subject: [PATCH 563/702] Put back argument, used by @pytest.fixture --- nibabel/tests/test_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index cca8d0ba81..5697752ea4 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -27,7 +27,7 @@ @pytest.fixture -def with_nimd_env(request): +def with_nimd_env(request, with_environment): # noqa: F811 DATA_FUNCS = {} DATA_FUNCS['home_dir_func'] = nibd.get_nipy_user_dir DATA_FUNCS['sys_dir_func'] = nibd.get_nipy_system_dir From 19e4a56f8e4d4f6e6e5460f08389c2ced5e44c16 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:08:24 +0100 Subject: [PATCH 564/702] MNT: ignore F401 in doc/source/conf.py Enforce that in pyproject.toml instead of conf.py itself. --- doc/source/conf.py | 4 ++-- pyproject.toml | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e8999b7d2b..175c6340bd 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -30,11 +30,11 @@ # Check for external Sphinx extensions we depend on try: - import numpydoc # noqa: F401 + import numpydoc except ImportError: raise RuntimeError('Need to install "numpydoc" package for doc build') try: - import texext # noqa: F401 + import texext except ImportError: raise RuntimeError('Need to install "texext" package for doc build') diff --git a/pyproject.toml b/pyproject.toml index 5df6d01896..bf7b099031 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,6 +135,7 @@ ignore = [ [tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401"] +"doc/source/conf.py" = ["F401"] [tool.ruff.format] quote-style = "single" From 066431d9bf5b6843514528ff5a6d81fbef4f8e9d Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:10:55 +0100 Subject: [PATCH 565/702] MNT: Get rid of last `coding: utf-8` --- doc/source/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 175c6340bd..f4ab16d2db 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## From 418188e34b18fcf5231b6a3c2a8e947608ea5aa3 Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Fri, 29 Mar 2024 17:13:39 +0000 Subject: [PATCH 566/702] DOC: fix typos for key kay -> key --- nibabel/analyze.py | 2 +- nibabel/dataobj_images.py | 2 +- nibabel/freesurfer/mghformat.py | 2 +- nibabel/spm99analyze.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 20fdac055a..bd3eaa8897 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -929,7 +929,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index eaf341271e..019d6b9551 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -437,7 +437,7 @@ def from_file_map( Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 5dd2660342..4c4b854a3e 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -495,7 +495,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 3465c57190..395a299c1a 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -240,7 +240,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only From 9fa116b3ddfb8065421fd6a5a9320bf7bd1646e3 Mon Sep 17 00:00:00 2001 From: Sandro Date: Mon, 1 Apr 2024 15:59:57 +0200 Subject: [PATCH 567/702] Python 3.13: Account for dedented docstrings - Dedent docstrings in Python 3.13+ - Fix #1311 - Ref: https://github.com/python/cpython/issues/81283 --- nibabel/deprecator.py | 15 +++++++++++++++ nibabel/tests/test_deprecator.py | 15 ++++++++++----- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 779fdb462d..a80fa25692 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -3,8 +3,10 @@ import functools import re +import sys import typing as ty import warnings +from textwrap import dedent if ty.TYPE_CHECKING: # pragma: no cover T = ty.TypeVar('T') @@ -12,6 +14,15 @@ _LEADING_WHITE = re.compile(r'^(\s*)') + +def _dedent_docstring(docstring): + """Compatibility with Python 3.13+. + + xref: https://github.com/python/cpython/issues/81283 + """ + return '\n'.join([dedent(line) for line in docstring.split('\n')]) + + TESTSETUP = """ .. testsetup:: @@ -32,6 +43,10 @@ """ +if sys.version_info >= (3, 13): + TESTSETUP = _dedent_docstring(TESTSETUP) + TESTCLEANUP = _dedent_docstring(TESTCLEANUP) + class ExpiredDeprecationError(RuntimeError): """Error for expired deprecation diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 833908af94..4303ff6737 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -14,6 +14,7 @@ Deprecator, ExpiredDeprecationError, _add_dep_doc, + _dedent_docstring, _ensure_cr, ) @@ -21,6 +22,14 @@ _OWN_MODULE = sys.modules[__name__] +func_docstring = ( + f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' + f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}' +) + +if sys.version_info >= (3, 13): + func_docstring = _dedent_docstring(func_docstring) + def test__ensure_cr(): # Make sure text ends with carriage return @@ -92,11 +101,7 @@ def test_dep_func(self): with pytest.deprecated_call() as w: assert func(1, 2) is None assert len(w) == 1 - assert ( - func.__doc__ - == f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' - f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}' - ) + assert func.__doc__ == func_docstring # Try some since and until versions func = dec('foo', '1.1')(func_no_doc) From f262e75361c4a737e4f6c534c2882b07b0d78fd7 Mon Sep 17 00:00:00 2001 From: Sandro Date: Tue, 2 Apr 2024 12:13:49 +0200 Subject: [PATCH 568/702] Update instructions for building docs The top level `Makefile` is outdated. This circumvents its use. --- doc/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/README.rst b/doc/README.rst index a19a3c1261..b2afd8ce16 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -6,4 +6,4 @@ To build the documentation, change to the root directory (containing ``setup.py``) and run:: pip install -r doc-requirements.txt - make html + make -C doc html From 9d1201396a6cf9714b96ed501408e048ae422754 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 14 Apr 2024 10:08:57 -0400 Subject: [PATCH 569/702] Update doc/README.rst --- doc/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/README.rst b/doc/README.rst index b2afd8ce16..d5fd9765e6 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -3,7 +3,7 @@ Nibabel documentation ##################### To build the documentation, change to the root directory (containing -``setup.py``) and run:: +``pyproject.toml``) and run:: pip install -r doc-requirements.txt make -C doc html From 568e37fb1e55a78d17978c12a269aa6e309e0e35 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 28 Apr 2024 21:17:16 -0400 Subject: [PATCH 570/702] TOX: Update dependencies for arm64 --- tox.ini | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tox.ini b/tox.ini index d0b8653457..2826623eac 100644 --- a/tox.ini +++ b/tox.ini @@ -43,14 +43,14 @@ DEPENDS = ARCH = x64: x64 x86: x86 + arm64: arm64 [testenv] description = Pytest with coverage labels = test install_command = python -I -m pip install -v \ - x64: --only-binary numpy,scipy,h5py,pillow \ - x86: --only-binary numpy,scipy,h5py,pillow,matplotlib \ + --only-binary numpy,scipy,h5py,pillow,matplotlib \ pre,dev: --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ {opts} {packages} pip_pre = @@ -91,11 +91,11 @@ deps = pre: numpy <2.0.dev0 dev: numpy >=2.0.dev0 # Scipy stopped producing win32 wheels at py310 - py3{8,9}-full-x86,x64: scipy >=1.6 + py3{8,9}-full-x86,x64,arm64: scipy >=1.6 # Matplotlib depends on scipy, so cannot be built for py310 on x86 - py3{8,9}-full-x86,x64: matplotlib >=3.4 + py3{8,9}-full-x86,x64,arm64: matplotlib >=3.4 # h5py stopped producing win32 wheels at py39 - py38-full-x86,x64: h5py >=2.10 + py38-full-x86,x64,arm64: h5py >=2.10 full,pre,dev: pillow >=8.1 full,pre,dev: indexed_gzip >=1.4 full,pre,dev: pyzstd >=0.14.3 From 8f2c039f2e3d9ccdb3af1a526e3ff1985819dabe Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 28 Apr 2024 21:27:04 -0400 Subject: [PATCH 571/702] CI: Add/distinguish macos-13-x64 and macos-14-arm64 runs --- .github/workflows/test.yml | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a6eb39734f..3b79c87105 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -112,9 +112,9 @@ jobs: strategy: fail-fast: false matrix: - os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] + os: ['ubuntu-latest', 'windows-latest', 'macos-13', 'macos-latest'] python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] - architecture: ['x64', 'x86'] + architecture: ['x64', 'x86', 'arm64'] dependencies: ['full', 'pre'] include: # Basic dependencies only @@ -130,12 +130,28 @@ jobs: python-version: '3.12' dependencies: 'dev' exclude: + # x86 for Windows + Python<3.12 - os: ubuntu-latest architecture: x86 + - os: macos-13 + architecture: x86 - os: macos-latest architecture: x86 - python-version: '3.12' architecture: x86 + # arm64 is available for macos-14+ + - os: ubuntu-latest + architecture: arm64 + - os: windows-latest + architecture: arm64 + - os: macos-13 + architecture: arm64 + # x64 is not available for macos-14+ + - os: macos-latest + architecture: x64 + # Drop pre tests for macos-13 + - os: macos-13 + dependencies: pre env: DEPENDS: ${{ matrix.dependencies }} From 08dd20d4d66984f9704cee9c49c7275f16e5e86a Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 28 Apr 2024 21:46:02 -0400 Subject: [PATCH 572/702] TOX: Print durations to see slow tests --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 2826623eac..b9ac9557cb 100644 --- a/tox.ini +++ b/tox.ini @@ -106,6 +106,7 @@ commands = pytest --doctest-modules --doctest-plus \ --cov nibabel --cov-report xml:cov.xml \ --junitxml test-results.xml \ + --durations=20 --durations-min=1.0 \ --pyargs nibabel {posargs:-n auto} [testenv:install] From feda198d53028db570e32509761088eedf98231d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 28 Apr 2024 21:53:08 -0400 Subject: [PATCH 573/702] CI: Run pre-release tests only on SPEC-0 supported Python --- .github/workflows/test.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3b79c87105..2b3d9f2494 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -152,6 +152,11 @@ jobs: # Drop pre tests for macos-13 - os: macos-13 dependencies: pre + # Drop pre tests for SPEC-0-unsupported Python versions + - python-version: '3.8' + dependencies: pre + - python-version: '3.9' + dependencies: pre env: DEPENDS: ${{ matrix.dependencies }} From 47ea032b541fccb512ecb44f9ddb9420cfacdd0a Mon Sep 17 00:00:00 2001 From: Guillaume Becq Date: Thu, 25 Apr 2024 16:33:18 +0200 Subject: [PATCH 574/702] Update OrthoSlicer3D._set_position in viewers.py wrong indices to original data leading to weird selection of voxels for weird affine transforms and weird volumes this bug is also related to strange behavior with special acquisition, for example with small animal settings such as rodents leading to wrong location of origin (0,0,0) with image.orthoview() --- nibabel/viewers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 1e927544ba..e66a34149a 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -399,7 +399,8 @@ def _set_position(self, x, y, z, notify=True): # deal with slicing appropriately self._position[:3] = [x, y, z] idxs = np.dot(self._inv_affine, self._position)[:3] - for ii, (size, idx) in enumerate(zip(self._sizes, idxs)): + idxs_new_order = idxs[self._order] + for ii, (size, idx) in enumerate(zip(self._sizes, idxs_new_order)): self._data_idx[ii] = max(min(int(round(idx)), size - 1), 0) for ii in range(3): # sagittal: get to S/A From ab64f37c2d0cd3ab1160d99cfe4ba27874b69cc2 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 5 May 2024 13:12:32 +0200 Subject: [PATCH 575/702] STY: Apply ruff/flake8-implicit-str-concat rule ISC001 ISC001 Implicitly concatenated string literals on one line This rule is currently disabled because it conflicts with the formatter: https://github.com/astral-sh/ruff/issues/8272 --- nibabel/streamlines/__init__.py | 2 +- nibabel/volumeutils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index dd00a1e842..46b403b424 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -131,7 +131,7 @@ def save(tractogram, filename, **kwargs): warnings.warn(msg, ExtensionWarning) if kwargs: - msg = "A 'TractogramFile' object was provided, no need for" ' keyword arguments.' + msg = "A 'TractogramFile' object was provided, no need for keyword arguments." raise ValueError(msg) tractogram_file.save(filename) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index cf2437e621..379d654a35 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -441,7 +441,7 @@ def array_from_file( True """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', 'r', 'r+'") in_dtype = np.dtype(in_dtype) # Get file-like object from Opener instance infile = getattr(infile, 'fobj', infile) From 1bd8c262c8ac1adb17eeb313456232488f721d83 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 21 May 2024 18:16:51 -0400 Subject: [PATCH 576/702] MNT: Fix ruff arg in pre-commit config --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 354bd3da1d..b348393a45 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: rev: v0.3.4 hooks: - id: ruff - args: [--fix, --show-fix, --exit-non-zero-on-fix] + args: [--fix, --show-fixes, --exit-non-zero-on-fix] exclude: = ["doc", "tools"] - id: ruff-format exclude: = ["doc", "tools"] From d571b92588447871fb8d869642d8053db44f1b74 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 21 May 2024 18:16:58 -0400 Subject: [PATCH 577/702] ENH: Add Nifti2 capabilities to nib-nifti-dx --- nibabel/cmdline/nifti_dx.py | 27 +++++++++++++++++++-------- nibabel/tests/test_scripts.py | 2 +- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 103bbf2640..eb917a04b8 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -9,8 +9,7 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Print nifti diagnostics for header files""" -import sys -from optparse import OptionParser +from argparse import ArgumentParser import nibabel as nib @@ -21,15 +20,27 @@ def main(args=None): """Go go team""" - parser = OptionParser( - usage=f'{sys.argv[0]} [FILE ...]\n\n' + __doc__, version='%prog ' + nib.__version__ + parser = ArgumentParser(description=__doc__) + parser.add_argument('--version', action='version', version=f'%(prog)s {nib.__version__}') + parser.add_argument( + '-1', + '--nifti1', + dest='header_class', + action='store_const', + const=nib.Nifti1Header, + default=nib.Nifti1Header, ) - (opts, files) = parser.parse_args(args=args) + parser.add_argument( + '-2', '--nifti2', dest='header_class', action='store_const', const=nib.Nifti2Header + ) + parser.add_argument('files', nargs='*', metavar='FILE', help='Nifti file names') + + args = parser.parse_args(args=args) - for fname in files: + for fname in args.files: with nib.openers.ImageOpener(fname) as fobj: - hdr = fobj.read(nib.nifti1.header_dtype.itemsize) - result = nib.Nifti1Header.diagnose_binaryblock(hdr) + hdr = fobj.read(args.header_class.template_dtype.itemsize) + result = args.header_class.diagnose_binaryblock(hdr) if len(result): print(f'Picky header check output for "{fname}"\n') print(result + '\n') diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 455a994ae1..d97c99d051 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -202,7 +202,7 @@ def test_help(): code, stdout, stderr = run_command([cmd, '--help']) assert code == 0 assert_re_in(f'.*{cmd}', stdout) - assert_re_in('.*Usage', stdout) + assert_re_in('.*[uU]sage', stdout) # Some third party modules might like to announce some Deprecation # etc warnings, see e.g. https://travis-ci.org/nipy/nibabel/jobs/370353602 if 'warning' not in stderr.lower(): From 82c8588528d5a06fd0dfc99e3cbb83d5cc299e2b Mon Sep 17 00:00:00 2001 From: Sandro Date: Wed, 29 May 2024 00:20:34 +0200 Subject: [PATCH 578/702] Replace deprecated setup() and teardown() Those were compatibility functions for porting from nose. They are now deprecated and have been removed from pytest. This will make all tests compatible with pytests 8.x. --- nibabel/streamlines/tests/test_streamlines.py | 2 +- nibabel/tests/test_deprecated.py | 4 ++-- nibabel/tests/test_dft.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index f0bd9c7c49..53a43c393a 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -20,7 +20,7 @@ DATA = {} -def setup(): +def setup_module(): global DATA DATA['empty_filenames'] = [pjoin(data_path, 'empty' + ext) for ext in FORMATS.keys()] DATA['simple_filenames'] = [pjoin(data_path, 'simple' + ext) for ext in FORMATS.keys()] diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index f1c3d517c9..01636632e4 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -14,12 +14,12 @@ from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF -def setup(): +def setup_module(): # Hack nibabel version string pkg_info.cmp_pkg_version.__defaults__ = ('2.0',) -def teardown(): +def teardown_module(): # Hack nibabel version string back again pkg_info.cmp_pkg_version.__defaults__ = (pkg_info.__version__,) diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index 654af98279..6c6695b16e 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -26,7 +26,7 @@ data_dir = pjoin(dirname(__file__), 'data') -def setUpModule(): +def setup_module(): if os.name == 'nt': raise unittest.SkipTest('FUSE not available for windows, skipping dft tests') if not have_dicom: From 95e7c156e0d115c222f4a7e9545f27edd8f6dced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Wed, 26 Jun 2024 19:43:01 -0400 Subject: [PATCH 579/702] RF: Prefer using `getlocale()` instead of `getdefaultlocale()` Prefer using `getlocale()` instead of `getdefaultlocale()`. Fixes: ``` /home/runner/work/nibabel/nibabel/nibabel/cmdline/dicomfs.py:40: DeprecationWarning: 'locale.getdefaultlocale' is deprecated and slated for removal in Python 3.15. Use setlocale(), getencoding() and getlocale() instead. encoding = locale.getdefaultlocale()[1] ``` raised for example at: https://github.com/nipy/nibabel/actions/runs/9637811213/job/26577586721#step:7:164 --- nibabel/cmdline/dicomfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 66ffb8adea..552bb09319 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -37,7 +37,7 @@ class dummy_fuse: import nibabel as nib import nibabel.dft as dft -encoding = locale.getdefaultlocale()[1] +encoding = locale.getlocale()[1] fuse.fuse_python_api = (0, 2) From 17809b067ddd22de438b9b49b116c2c496b7a752 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Wed, 26 Jun 2024 19:51:43 -0400 Subject: [PATCH 580/702] RF: Prefer using `np.vstack` instead of `np.row_stack` Prefer using `np.vstack` instead of `np.row_stack`. Fixes: ``` nibabel/ecat.py: 3 warnings /home/runner/work/nibabel/nibabel/nibabel/ecat.py:393: DeprecationWarning: `row_stack` alias is deprecated. Use `np.vstack` directly. return np.row_stack(mlists) ``` and similar warnings. Raised for example at: https://github.com/nipy/nibabel/actions/runs/9637811213/job/26577586721#step:7:186 Documentation: https://numpy.org/doc/1.26/reference/generated/numpy.row_stack.html This helps preparing for full Numpy 2.0 compatibility. Documentation: https://numpy.org/doc/stable/numpy_2_0_migration_guide.html#main-namespace --- nibabel/brikhead.py | 2 +- nibabel/ecat.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 3a3cfd0871..da8692efd3 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -391,7 +391,7 @@ def get_affine(self): # AFNI default is RAI- == LPS+ == DICOM order. We need to flip RA sign # to align with nibabel RAS+ system affine = np.asarray(self.info['IJK_TO_DICOM_REAL']).reshape(3, 4) - affine = np.row_stack((affine * [[-1], [-1], [1]], [0, 0, 0, 1])) + affine = np.vstack((affine * [[-1], [-1], [1]], [0, 0, 0, 1])) return affine def get_data_scaling(self): diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 03a4c72b98..34ff06323c 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -390,7 +390,7 @@ def read_mlist(fileobj, endianness): mlist_index += n_rows if mlist_block_no <= 2: # should block_no in (1, 2) be an error? break - return np.row_stack(mlists) + return np.vstack(mlists) def get_frame_order(mlist): From 94e3e83752c58b1ae20a50e97c5ea9eed21abacf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Wed, 26 Jun 2024 20:17:19 -0400 Subject: [PATCH 581/702] RF: Fix `ast` library type and attribute deprecation warnings Fix `ast` library type and attribute deprecation warnings. Fixes: ``` /home/runner/work/nibabel/nibabel/nibabel/nicom/ascconv.py:177: DeprecationWarning: ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead if isinstance(value, ast.Num): /home/runner/work/nibabel/nibabel/nibabel/nicom/ascconv.py:179: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead if isinstance(value, ast.Str): /home/runner/work/nibabel/nibabel/nibabel/nicom/ascconv.py:180: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead return value.s /home/runner/work/nibabel/nibabel/nibabel/nicom/ascconv.py:94: DeprecationWarning: Attribute n is deprecated and will be removed in Python 3.14; use value instead index = target.slice.n /home/runner/work/nibabel/nibabel/nibabel/nicom/ascconv.py:182: DeprecationWarning: Attribute n is deprecated and will be removed in Python 3.14; use value instead return -value.operand.n ``` raised for example in: https://github.com/nipy/nibabel/actions/runs/9637811213/job/26577586721#step:7:207 Documentation: https://docs.python.org/3/library/ast.html --- nibabel/nicom/ascconv.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index 0966de2a96..8ec72fb3ec 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -91,7 +91,7 @@ def assign2atoms(assign_ast, default_class=int): prev_target_type = OrderedDict elif isinstance(target, ast.Subscript): if isinstance(target.slice, ast.Constant): # PY39 - index = target.slice.n + index = target.slice.value else: # PY38 index = target.slice.value.n atoms.append(Atom(target, prev_target_type, index)) @@ -174,12 +174,10 @@ def obj_from_atoms(atoms, namespace): def _get_value(assign): value = assign.value - if isinstance(value, ast.Num): - return value.n - if isinstance(value, ast.Str): - return value.s + if isinstance(value, ast.Constant): + return value.value if isinstance(value, ast.UnaryOp) and isinstance(value.op, ast.USub): - return -value.operand.n + return -value.operand.value raise AscconvParseError(f'Unexpected RHS of assignment: {value}') From d1235a6ef5ea31c5be784a6b5448b9e0d598014f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Wed, 26 Jun 2024 20:04:02 -0400 Subject: [PATCH 582/702] RF: Remove unnecessary call to `asbytes` for `b`-prepended strings Remove unnecessary call to `asbytes` for `b`-prepended strings: strings prepended with `b` are already treated as bytes literals: - `TckFile.MAGIC_NUMBER` is b'mrtrix tracks' - `TrkFile.MAGIC_NUMBER` is b'TRACK' Documentation: https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals Fixes: ``` /home/runner/work/nibabel/nibabel/nibabel/streamlines/tests/test_streamlines.py:9: DeprecationWarning: `np.compat`, which was used during the Python 2 to 3 transition, is deprecated since 1.26.0, and will be removed from numpy.compat.py3k import asbytes ``` raised for example at: https://github.com/nipy/nibabel/actions/runs/9637811213/job/26577586721#step:7:178 --- nibabel/streamlines/tests/test_streamlines.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 53a43c393a..857e64fec9 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -6,7 +6,6 @@ import numpy as np import pytest -from numpy.compat.py3k import asbytes import nibabel as nib from nibabel.testing import clear_and_catch_warnings, data_path, error_warnings @@ -95,7 +94,7 @@ def test_is_supported_detect_format(tmp_path): # Valid file without extension for tfile_cls in FORMATS.values(): f = BytesIO() - f.write(asbytes(tfile_cls.MAGIC_NUMBER)) + f.write(tfile_cls.MAGIC_NUMBER) f.seek(0, os.SEEK_SET) assert nib.streamlines.is_supported(f) assert nib.streamlines.detect_format(f) is tfile_cls @@ -104,7 +103,7 @@ def test_is_supported_detect_format(tmp_path): for tfile_cls in FORMATS.values(): fpath = tmp_path / 'test.txt' with open(fpath, 'w+b') as f: - f.write(asbytes(tfile_cls.MAGIC_NUMBER)) + f.write(tfile_cls.MAGIC_NUMBER) f.seek(0, os.SEEK_SET) assert nib.streamlines.is_supported(f) assert nib.streamlines.detect_format(f) is tfile_cls From 447ef576316d814138f7af33cee97dc6e23e5337 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Wed, 26 Jun 2024 20:27:22 -0400 Subject: [PATCH 583/702] RF: Fix for `abc` library `Traversable` class module Fix for `abc` library `Traversable` class module: import from `importlib.resources.abc`. Fixes: ``` /home/runner/work/nibabel/nibabel/nibabel/testing/__init__.py:30: DeprecationWarning: 'importlib.abc.Traversable' is deprecated and slated for removal in Python 3.14 from importlib.abc import Traversable ``` raised for example at: https://github.com/nipy/nibabel/actions/runs/9637811213/job/26577586721#step:7:157 Documentation: https://docs.python.org/3/library/importlib.resources.abc.html#importlib.resources.abc.Traversable --- nibabel/testing/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index d335c9a8c6..0ba82d6cb0 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -27,8 +27,8 @@ from .np_features import memmap_after_ufunc try: - from importlib.abc import Traversable from importlib.resources import as_file, files + from importlib.resources.abc import Traversable except ImportError: # PY38 from importlib_resources import as_file, files from importlib_resources.abc import Traversable From 7caef99068f88bafbf25f61b0e75b10770e28df4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 27 Jun 2024 16:27:58 +0900 Subject: [PATCH 584/702] MNT: Update importlib_resources requirement to match 3.12 usage --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index bf7b099031..4df5886d78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ requires-python = ">=3.8" dependencies = [ "numpy >=1.20", "packaging >=17", - "importlib_resources >=1.3; python_version < '3.9'", + "importlib_resources >=5.12; python_version < '3.12'", ] classifiers = [ "Development Status :: 5 - Production/Stable", From 3a7cebaca9729b0b03c8dd4ba01ff1a62d39cb26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Thu, 27 Jun 2024 18:35:51 -0400 Subject: [PATCH 585/702] RF: Use `numpy.lib.scimath` to demonstrate warning context manager Use `numpy.lib.scimath` instead of deprecated `numpy.core.fromnumeric` in `clear_and_catch_warnings` context manager doctests. Take advantage of the commit to add an actual case that would raise a warning. Fixes: ``` nibabel/testing/__init__.py::nibabel.testing.clear_and_catch_warnings :1: DeprecationWarning: numpy.core is deprecated and has been renamed to numpy._core. The numpy._core namespace contains private NumPy internals and its use is discouraged, as NumPy internals can change without warning in any release. In practice, most real-world usage of numpy.core is to access functionality in the public NumPy API. If that is the case, use the public NumPy API. If not, you are using NumPy internals. If you would still like to access an internal attribute, use numpy._core.fromnumeric. ``` raised for example at: https://github.com/nipy/nibabel/actions/runs/9692730430/job/26746686623#step:7:195 --- nibabel/testing/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 0ba82d6cb0..992ef2ead4 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -150,9 +150,10 @@ class clear_and_catch_warnings(warnings.catch_warnings): Examples -------- >>> import warnings - >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]): + >>> with clear_and_catch_warnings(modules=[np.lib.scimath]): ... warnings.simplefilter('always') - ... # do something that raises a warning in np.core.fromnumeric + ... # do something that raises a warning in np.lib.scimath + ... _ = np.arccos(90) """ class_modules = () From 170b20c53a3c0c0bfae29ebd8c14638cfb9d192e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 5 Jul 2024 21:10:41 -0400 Subject: [PATCH 586/702] FIX: Use legacy numpy printing during doc builds/tests --- doc/source/conf.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index f4ab16d2db..4255ff1841 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -28,6 +28,10 @@ import tomli as tomllib # Check for external Sphinx extensions we depend on +try: + import numpy as np +except ImportError: + raise RuntimeError('Need to install "numpy" package for doc build') try: import numpydoc except ImportError: @@ -45,6 +49,11 @@ 'Need nibabel on Python PATH; consider "make htmldoc" from nibabel root directory' ) +from packaging.version import Version + +if Version(np.__version__) >= Version('1.22'): + np.set_printoptions(legacy='1.21') + # -- General configuration ---------------------------------------------------- # We load the nibabel release info into a dict by explicit execution From 65c3ca28a21b5aa15e0fac06e6b5a3faa0096857 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 7 Jul 2024 07:26:30 -0400 Subject: [PATCH 587/702] MNT: Update coverage config Remove ignored entry, add excludes for patterns that are unreachable or reasonable not to test. --- .coveragerc | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.coveragerc b/.coveragerc index bcf28e09c2..8e218461f5 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,9 +1,19 @@ [run] branch = True source = nibabel -include = */nibabel/* omit = */externals/* */benchmarks/* */tests/* nibabel/_version.py + +[report] +exclude_also = + def __repr__ + if (ty\.|typing\.)?TYPE_CHECKING: + class .*\((ty\.|typing\.)Protocol\): + @(ty\.|typing\.)overload + if 0: + if __name__ == .__main__.: + @(abc\.)?abstractmethod + raise NotImplementedError From 2306616a1fb0bf1752b8cd3ad12b19156e64c295 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 8 Jul 2024 11:29:52 -0400 Subject: [PATCH 588/702] MNT: Remove "pragma: no cover" from lines ignored by config --- nibabel/_compression.py | 2 +- nibabel/arrayproxy.py | 12 +++++------- nibabel/dataobj_images.py | 2 +- nibabel/deprecated.py | 2 +- nibabel/deprecator.py | 2 +- nibabel/filebasedimages.py | 14 +++++++------- nibabel/filename_parser.py | 2 +- nibabel/loadsave.py | 2 +- nibabel/onetime.py | 6 ++---- nibabel/openers.py | 7 +++---- nibabel/pointset.py | 8 +++----- nibabel/spatialimages.py | 15 ++++++--------- nibabel/volumeutils.py | 6 +++--- nibabel/xmlutils.py | 8 ++++---- 14 files changed, 39 insertions(+), 49 deletions(-) diff --git a/nibabel/_compression.py b/nibabel/_compression.py index eeb66f36b4..f697fa54cc 100644 --- a/nibabel/_compression.py +++ b/nibabel/_compression.py @@ -17,7 +17,7 @@ from .optpkg import optional_package -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: import indexed_gzip # type: ignore[import] import pyzstd diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 4bf5bd4700..ed2310519e 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -57,7 +57,7 @@ KEEP_FILE_OPEN_DEFAULT = False -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: import numpy.typing as npt from typing_extensions import Self # PY310 @@ -75,19 +75,17 @@ class ArrayLike(ty.Protocol): shape: tuple[int, ...] @property - def ndim(self) -> int: ... # pragma: no cover + def ndim(self) -> int: ... # If no dtype is passed, any dtype might be returned, depending on the array-like @ty.overload - def __array__( - self, dtype: None = ..., / - ) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... # pragma: no cover + def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... # Any dtype might be passed, and *that* dtype must be returned @ty.overload - def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... # pragma: no cover + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... - def __getitem__(self, key, /) -> npt.NDArray: ... # pragma: no cover + def __getitem__(self, key, /) -> npt.NDArray: ... class ArrayProxy(ArrayLike): diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index e84ac8567a..6850599014 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -19,7 +19,7 @@ from .filebasedimages import FileBasedHeader, FileBasedImage from .fileholders import FileMap -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: import numpy.typing as npt from .filename_parser import FileSpec diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index b8c378cee3..15d3e53265 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -8,7 +8,7 @@ from .deprecator import Deprecator from .pkg_info import cmp_pkg_version -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: P = ty.ParamSpec('P') diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 010b1be234..83118dd539 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -9,7 +9,7 @@ import warnings from textwrap import dedent -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: T = ty.TypeVar('T') P = ty.ParamSpec('P') diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 4e0d06b64c..c12644a2bd 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -20,7 +20,7 @@ from .filename_parser import TypesFilenamesError, _stringify_path, splitext_addext, types_filenames from .openers import ImageOpener -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from .filename_parser import ExtensionSpec, FileSpec FileSniff = ty.Tuple[bytes, str] @@ -54,13 +54,13 @@ def from_header(klass: type[HdrT], header: FileBasedHeader | ty.Mapping | None = @classmethod def from_fileobj(klass: type[HdrT], fileobj: io.IOBase) -> HdrT: - raise NotImplementedError # pragma: no cover + raise NotImplementedError def write_to(self, fileobj: io.IOBase) -> None: - raise NotImplementedError # pragma: no cover + raise NotImplementedError def __eq__(self, other: object) -> bool: - raise NotImplementedError # pragma: no cover + raise NotImplementedError def __ne__(self, other: object) -> bool: return not self == other @@ -251,7 +251,7 @@ def from_filename(klass: type[ImgT], filename: FileSpec) -> ImgT: @classmethod def from_file_map(klass: type[ImgT], file_map: FileMap) -> ImgT: - raise NotImplementedError # pragma: no cover + raise NotImplementedError @classmethod def filespec_to_file_map(klass, filespec: FileSpec) -> FileMap: @@ -308,7 +308,7 @@ def to_filename(self, filename: FileSpec, **kwargs) -> None: self.to_file_map(**kwargs) def to_file_map(self, file_map: FileMap | None = None, **kwargs) -> None: - raise NotImplementedError # pragma: no cover + raise NotImplementedError @classmethod def make_file_map(klass, mapping: ty.Mapping[str, str | io.IOBase] | None = None) -> FileMap: @@ -373,7 +373,7 @@ def from_image(klass: type[ImgT], img: FileBasedImage) -> ImgT: img : ``FileBasedImage`` instance Image, of our own class """ - raise NotImplementedError # pragma: no cover + raise NotImplementedError @classmethod def _sniff_meta_for( diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index bdbca6a383..d2c23ae6e4 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -14,7 +14,7 @@ import pathlib import typing as ty -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: FileSpec = str | os.PathLike[str] ExtensionSpec = tuple[str, str | None] diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 159d9bae82..e39aeceba3 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -26,7 +26,7 @@ _compressed_suffixes = ('.gz', '.bz2', '.zst') -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from .filebasedimages import FileBasedImage from .filename_parser import FileSpec diff --git a/nibabel/onetime.py b/nibabel/onetime.py index fa1b2f9927..5018ba90c5 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -137,12 +137,10 @@ def __init__(self, func: ty.Callable[[InstanceT], T]) -> None: @ty.overload def __get__( self, obj: None, objtype: type[InstanceT] | None = None - ) -> ty.Callable[[InstanceT], T]: ... # pragma: no cover + ) -> ty.Callable[[InstanceT], T]: ... @ty.overload - def __get__( - self, obj: InstanceT, objtype: type[InstanceT] | None = None - ) -> T: ... # pragma: no cover + def __get__(self, obj: InstanceT, objtype: type[InstanceT] | None = None) -> T: ... def __get__( self, obj: InstanceT | None, objtype: type[InstanceT] | None = None diff --git a/nibabel/openers.py b/nibabel/openers.py index f84ccb7069..c3fa9a4783 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -18,7 +18,7 @@ from ._compression import HAVE_INDEXED_GZIP, IndexedGzipFile, pyzstd -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from types import TracebackType from _typeshed import WriteableBuffer @@ -36,9 +36,8 @@ @ty.runtime_checkable class Fileish(ty.Protocol): - def read(self, size: int = -1, /) -> bytes: ... # pragma: no cover - - def write(self, b: bytes, /) -> int | None: ... # pragma: no cover + def read(self, size: int = -1, /) -> bytes: ... + def write(self, b: bytes, /) -> int | None: ... class DeterministicGzipFile(gzip.GzipFile): diff --git a/nibabel/pointset.py b/nibabel/pointset.py index e39a4d4187..70a802480d 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -30,7 +30,7 @@ from nibabel.fileslice import strided_scalar from nibabel.spatialimages import SpatialImage -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from typing_extensions import Self _DType = ty.TypeVar('_DType', bound=np.dtype[ty.Any]) @@ -41,12 +41,10 @@ class CoordinateArray(ty.Protocol): shape: tuple[int, int] @ty.overload - def __array__( - self, dtype: None = ..., / - ) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... # pragma: no cover + def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... @ty.overload - def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... # pragma: no cover + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... @dataclass diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 185694cd72..96f8115a22 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -154,7 +154,7 @@ except ImportError: # PY38 from functools import lru_cache as cache -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: import numpy.typing as npt SpatialImgT = ty.TypeVar('SpatialImgT', bound='SpatialImage') @@ -162,18 +162,15 @@ class HasDtype(ty.Protocol): - def get_data_dtype(self) -> np.dtype: ... # pragma: no cover - - def set_data_dtype(self, dtype: npt.DTypeLike) -> None: ... # pragma: no cover + def get_data_dtype(self) -> np.dtype: ... + def set_data_dtype(self, dtype: npt.DTypeLike) -> None: ... @ty.runtime_checkable class SpatialProtocol(ty.Protocol): - def get_data_dtype(self) -> np.dtype: ... # pragma: no cover - - def get_data_shape(self) -> ty.Tuple[int, ...]: ... # pragma: no cover - - def get_zooms(self) -> ty.Tuple[float, ...]: ... # pragma: no cover + def get_data_dtype(self) -> np.dtype: ... + def get_data_shape(self) -> ty.Tuple[int, ...]: ... + def get_zooms(self) -> ty.Tuple[float, ...]: ... class HeaderDataError(Exception): diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 379d654a35..29b954dbb3 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -24,7 +24,7 @@ from .casting import OK_FLOATS, shared_range from .externals.oset import OrderedSet -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: import numpy.typing as npt Scalar = np.number | float @@ -1191,13 +1191,13 @@ def _ftype4scaled_finite( @ty.overload def finite_range( arr: npt.ArrayLike, check_nan: ty.Literal[False] = False -) -> tuple[Scalar, Scalar]: ... # pragma: no cover +) -> tuple[Scalar, Scalar]: ... @ty.overload def finite_range( arr: npt.ArrayLike, check_nan: ty.Literal[True] -) -> tuple[Scalar, Scalar, bool]: ... # pragma: no cover +) -> tuple[Scalar, Scalar, bool]: ... def finite_range( diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 5049a76412..5d079e1172 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -20,7 +20,7 @@ class XmlSerializable: def _to_xml_element(self) -> Element: """Output should be a xml.etree.ElementTree.Element""" - raise NotImplementedError # pragma: no cover + raise NotImplementedError def to_xml(self, enc='utf-8', **kwargs) -> bytes: r"""Generate an XML bytestring with a given encoding. @@ -109,10 +109,10 @@ def parse(self, string=None, fname=None, fptr=None): parser.ParseFile(fptr) def StartElementHandler(self, name, attrs): - raise NotImplementedError # pragma: no cover + raise NotImplementedError def EndElementHandler(self, name): - raise NotImplementedError # pragma: no cover + raise NotImplementedError def CharacterDataHandler(self, data): - raise NotImplementedError # pragma: no cover + raise NotImplementedError From 043c431ef46c5f6cd301a087bda2173a7972ab75 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 8 Jul 2024 11:40:54 -0400 Subject: [PATCH 589/702] MNT: Require coverage>=7.2 for exclude_also Remove outdated pytest version cap while we're here. --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4df5886d78..ff5168f9c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,11 +67,12 @@ doc = [ "tomli; python_version < '3.11'", ] test = [ - "pytest<8.1", # relax once pytest-doctestplus releases 1.2.0 + "pytest", "pytest-doctestplus", "pytest-cov", "pytest-httpserver", "pytest-xdist", + "coverage>=7.2", ] # Remaining: Simpler to centralize in tox dev = ["tox"] From ee1c9c43900dc42d511d08a4302d4486c9258250 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 8 Jul 2024 11:42:23 -0400 Subject: [PATCH 590/702] MNT: Stop excluding tests from coverage --- .coveragerc | 1 - 1 file changed, 1 deletion(-) diff --git a/.coveragerc b/.coveragerc index 8e218461f5..f65ab1441f 100644 --- a/.coveragerc +++ b/.coveragerc @@ -4,7 +4,6 @@ source = nibabel omit = */externals/* */benchmarks/* - */tests/* nibabel/_version.py [report] From 07db76b966020b26b636e5fd94b79b8b04b440ab Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 10 Jul 2024 21:16:03 -0400 Subject: [PATCH 591/702] CI: Add 3.13-nogil build --- .github/workflows/test.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2b3d9f2494..2b453e890a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -125,9 +125,9 @@ jobs: - os: ubuntu-latest python-version: 3.8 dependencies: 'min' - # NumPy 2.0 + # NoGIL - os: ubuntu-latest - python-version: '3.12' + python-version: '3.13-dev' dependencies: 'dev' exclude: # x86 for Windows + Python<3.12 @@ -168,11 +168,18 @@ jobs: submodules: recursive fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} + if: "!endsWith(matrix.python-version, '-dev')" uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} allow-prereleases: true + - name: Set up Python ${{ matrix.python-version }} + if: endsWith(matrix.python-version, '-dev') + uses: deadsnakes/action@v3.1.0 + with: + python-version: ${{ matrix.python-version }} + nogil: true - name: Display Python version run: python -c "import sys; print(sys.version)" - name: Install tox From 6efd41a7279de2488aa857518e3ab30e8a8ff6d4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 10 Jul 2024 21:17:24 -0400 Subject: [PATCH 592/702] TOX: Add a Python 3.13 environment --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index b9ac9557cb..02de7a7e08 100644 --- a/tox.ini +++ b/tox.ini @@ -16,7 +16,7 @@ envlist = # x64-only range py312-{full,pre}-x64 # Special environment for numpy 2.0-dev testing - py312-dev-x64 + py313-dev-x64 install doctest style @@ -31,6 +31,7 @@ python = 3.10: py310 3.11: py311 3.12: py312 + 3.13: py313 [gh-actions:env] DEPENDS = From cb73d1c6dfcd0e8ca93011125cf507c85987f1ad Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 10 Jul 2024 21:26:42 -0400 Subject: [PATCH 593/702] TOX: Drop h5py and indexed_gzip dependencies for dev Allow pillow and matplotlib to be built from sdist in dev environments. --- tox.ini | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 02de7a7e08..5b4dcc0174 100644 --- a/tox.ini +++ b/tox.ini @@ -51,7 +51,8 @@ description = Pytest with coverage labels = test install_command = python -I -m pip install -v \ - --only-binary numpy,scipy,h5py,pillow,matplotlib \ + dev: --only-binary numpy,scipy,h5py \ + !dev: --only-binary numpy,scipy,h5py,pillow,matplotlib \ pre,dev: --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ {opts} {packages} pip_pre = @@ -90,15 +91,15 @@ deps = # Numpy 2.0 is a major breaking release; we cannot put much effort into # supporting until it's at least RC stable pre: numpy <2.0.dev0 - dev: numpy >=2.0.dev0 + dev: numpy >=2.1.dev0 # Scipy stopped producing win32 wheels at py310 py3{8,9}-full-x86,x64,arm64: scipy >=1.6 # Matplotlib depends on scipy, so cannot be built for py310 on x86 py3{8,9}-full-x86,x64,arm64: matplotlib >=3.4 # h5py stopped producing win32 wheels at py39 - py38-full-x86,x64,arm64: h5py >=2.10 + py38-full-x86,{full,pre}-{x64,arm64}: h5py >=2.10 full,pre,dev: pillow >=8.1 - full,pre,dev: indexed_gzip >=1.4 + full,pre: indexed_gzip >=1.4 full,pre,dev: pyzstd >=0.14.3 full,pre: pydicom >=2.1 dev: pydicom @ git+https://github.com/pydicom/pydicom.git@main From a14ead51ccb8ff3da9603e5ca0002857de18ae6d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 10 Jul 2024 22:01:49 -0400 Subject: [PATCH 594/702] CI: Run tox in debug to see what files are downloaded --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2b453e890a..05718dc1ff 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -189,7 +189,7 @@ jobs: - name: Show tox config run: tox c - name: Run tox - run: tox -v --exit-and-dump-after 1200 + run: tox -vv --exit-and-dump-after 1200 - uses: codecov/codecov-action@v4 if: ${{ always() }} with: From 880e13e3dcd30b077762e1c8b46ce76496bd28b8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 10 Jul 2024 22:08:41 -0400 Subject: [PATCH 595/702] TOX: Add PYTHON_GIL=0 to py313 environments --- tox.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tox.ini b/tox.ini index 5b4dcc0174..5df35c8d38 100644 --- a/tox.ini +++ b/tox.ini @@ -71,6 +71,8 @@ pass_env = NO_COLOR CLICOLOR CLICOLOR_FORCE +set_env = + py313: PYTHON_GIL=0 extras = test deps = # General minimum dependencies: pin based on API usage From e0e50df3e8fb7a48fba207098ec446abf9d2efed Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 25 Jul 2024 11:34:36 -0400 Subject: [PATCH 596/702] RF: Replace OneTimeProperty/auto_attr with cached_property --- nibabel/onetime.py | 114 +++++++--------------------------- nibabel/tests/test_onetime.py | 40 ++++++++---- 2 files changed, 51 insertions(+), 103 deletions(-) diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 5018ba90c5..f6d3633af3 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -1,9 +1,12 @@ """Descriptor support for NIPY -Utilities to support special Python descriptors [1,2], in particular the use of -a useful pattern for properties we call 'one time properties'. These are -object attributes which are declared as properties, but become regular -attributes once they've been read the first time. They can thus be evaluated +Utilities to support special Python descriptors [1,2], in particular +:func:`~functools.cached_property`, which has been available in the Python +standard library since Python 3.8. We currently maintain aliases from +earlier names for this descriptor, specifically `OneTimeProperty` and `auto_attr`. + +:func:`~functools.cached_property` creates properties that are computed once +and then stored as regular attributes. They can thus be evaluated later in the object's life cycle, but once evaluated they become normal, static attributes with no function call overhead on access or any other constraints. @@ -21,10 +24,7 @@ from __future__ import annotations -import typing as ty - -InstanceT = ty.TypeVar('InstanceT') -T = ty.TypeVar('T') +from functools import cached_property from nibabel.deprecated import deprecate_with_version @@ -34,22 +34,22 @@ class ResetMixin: - """A Mixin class to add a .reset() method to users of OneTimeProperty. + """A Mixin class to add a .reset() method to users of cached_property. - By default, auto attributes once computed, become static. If they happen + By default, cached properties, once computed, become static. If they happen to depend on other parts of an object and those parts change, their values may now be invalid. This class offers a .reset() method that users can call *explicitly* when they know the state of their objects may have changed and they want to ensure that *all* their special attributes should be invalidated. Once - reset() is called, all their auto attributes are reset to their - OneTimeProperty descriptors, and their accessor functions will be triggered - again. + reset() is called, all their cached properties are reset to their + :func:`~functools.cached_property` descriptors, + and their accessor functions will be triggered again. .. warning:: - If a class has a set of attributes that are OneTimeProperty, but that + If a class has a set of attributes that are cached_property, but that can be initialized from any one of them, do NOT use this mixin! For instance, UniformTimeSeries can be initialized with only sampling_rate and t0, sampling_interval and time are auto-computed. But if you were @@ -68,15 +68,15 @@ class ResetMixin: ... def __init__(self,x=1.0): ... self.x = x ... - ... @auto_attr + ... @cached_property ... def y(self): ... print('*** y computation executed ***') ... return self.x / 2.0 - ... >>> a = A(10) About to access y twice, the second time no computation is done: + >>> a.y *** y computation executed *** 5.0 @@ -84,17 +84,21 @@ class ResetMixin: 5.0 Changing x + >>> a.x = 20 a.y doesn't change to 10, since it is a static attribute: + >>> a.y 5.0 We now reset a, and this will then force all auto attributes to recompute the next time we access them: + >>> a.reset() About to access y twice again after reset(): + >>> a.y *** y computation executed *** 10.0 @@ -103,88 +107,18 @@ class ResetMixin: """ def reset(self) -> None: - """Reset all OneTimeProperty attributes that may have fired already.""" + """Reset all cached_property attributes that may have fired already.""" # To reset them, we simply remove them from the instance dict. At that # point, it's as if they had never been computed. On the next access, # the accessor function from the parent class will be called, simply # because that's how the python descriptor protocol works. for mname, mval in self.__class__.__dict__.items(): - if mname in self.__dict__ and isinstance(mval, OneTimeProperty): + if mname in self.__dict__ and isinstance(mval, cached_property): delattr(self, mname) -class OneTimeProperty(ty.Generic[T]): - """A descriptor to make special properties that become normal attributes. - - This is meant to be used mostly by the auto_attr decorator in this module. - """ - - def __init__(self, func: ty.Callable[[InstanceT], T]) -> None: - """Create a OneTimeProperty instance. - - Parameters - ---------- - func : method - - The method that will be called the first time to compute a value. - Afterwards, the method's name will be a standard attribute holding - the value of this computation. - """ - self.getter = func - self.name = func.__name__ - self.__doc__ = func.__doc__ - - @ty.overload - def __get__( - self, obj: None, objtype: type[InstanceT] | None = None - ) -> ty.Callable[[InstanceT], T]: ... - - @ty.overload - def __get__(self, obj: InstanceT, objtype: type[InstanceT] | None = None) -> T: ... - - def __get__( - self, obj: InstanceT | None, objtype: type[InstanceT] | None = None - ) -> T | ty.Callable[[InstanceT], T]: - """This will be called on attribute access on the class or instance.""" - if obj is None: - # Being called on the class, return the original function. This - # way, introspection works on the class. - return self.getter - - # Errors in the following line are errors in setting a OneTimeProperty - val = self.getter(obj) - - obj.__dict__[self.name] = val - return val - - -def auto_attr(func: ty.Callable[[InstanceT], T]) -> OneTimeProperty[T]: - """Decorator to create OneTimeProperty attributes. - - Parameters - ---------- - func : method - The method that will be called the first time to compute a value. - Afterwards, the method's name will be a standard attribute holding the - value of this computation. - - Examples - -------- - >>> class MagicProp: - ... @auto_attr - ... def a(self): - ... return 99 - ... - >>> x = MagicProp() - >>> 'a' in x.__dict__ - False - >>> x.a - 99 - >>> 'a' in x.__dict__ - True - """ - return OneTimeProperty(func) - +OneTimeProperty = cached_property +auto_attr = cached_property # ----------------------------------------------------------------------------- # Deprecated API diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index 4d72949271..d6b4579534 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,7 +1,22 @@ -from nibabel.onetime import auto_attr, setattr_on_read +from functools import cached_property + +from nibabel.onetime import ResetMixin, setattr_on_read from nibabel.testing import deprecated_to, expires +class A(ResetMixin): + @cached_property + def y(self): + return self.x / 2.0 + + @cached_property + def z(self): + return self.x / 3.0 + + def __init__(self, x=1.0): + self.x = x + + @expires('5.0.0') def test_setattr_on_read(): with deprecated_to('5.0.0'): @@ -19,15 +34,14 @@ def a(self): assert x.a is obj -def test_auto_attr(): - class MagicProp: - @auto_attr - def a(self): - return object() - - x = MagicProp() - assert 'a' not in x.__dict__ - obj = x.a - assert 'a' in x.__dict__ - # Each call to object() produces a unique object. Verify we get the same one every time. - assert x.a is obj +def test_ResetMixin(): + a = A(10) + assert 'y' not in a.__dict__ + assert a.y == 5 + assert 'y' in a.__dict__ + a.x = 20 + assert a.y == 5 + # Call reset and no error should be raised even though z was never accessed + a.reset() + assert 'y' not in a.__dict__ + assert a.y == 10 From c7c98f7dae9733e38892b70bfcd190610e21c5d0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 25 Jul 2024 11:42:00 -0400 Subject: [PATCH 597/702] DOC: Use packaging.version.Version over LooseVersion --- doc/tools/build_modref_templates.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index 0e82cf6bf8..76cf9cdf39 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -9,7 +9,7 @@ import sys # version comparison -from distutils.version import LooseVersion as V +from packaging.version import Version as V from os.path import join as pjoin # local imports @@ -73,6 +73,8 @@ def abort(error): if re.match('^_version_(major|minor|micro|extra)', v) ] ) + + source_version = V(source_version) print('***', source_version) if source_version != installed_version: From b6eccc250cc56ddc1cb8a81b240f0bc0e3325436 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 25 Jul 2024 12:04:29 -0400 Subject: [PATCH 598/702] RF: nibabel.onetime.auto_attr -> functools.cached_property --- nibabel/nicom/dicomwrappers.py | 46 +++++++++++++++++----------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index a5ea550d87..2270ed3f05 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -14,12 +14,12 @@ import operator import warnings +from functools import cached_property import numpy as np from nibabel.optpkg import optional_package -from ..onetime import auto_attr as one_time from ..openers import ImageOpener from . import csareader as csar from .dwiparams import B2q, nearest_pos_semi_def, q2bg @@ -140,7 +140,7 @@ def __init__(self, dcm_data): """ self.dcm_data = dcm_data - @one_time + @cached_property def image_shape(self): """The array shape as it will be returned by ``get_data()``""" shape = (self.get('Rows'), self.get('Columns')) @@ -148,7 +148,7 @@ def image_shape(self): return None return shape - @one_time + @cached_property def image_orient_patient(self): """Note that this is _not_ LR flipped""" iop = self.get('ImageOrientationPatient') @@ -158,7 +158,7 @@ def image_orient_patient(self): iop = np.array(list(map(float, iop))) return np.array(iop).reshape(2, 3).T - @one_time + @cached_property def slice_normal(self): iop = self.image_orient_patient if iop is None: @@ -166,7 +166,7 @@ def slice_normal(self): # iop[:, 0] is column index cosine, iop[:, 1] is row index cosine return np.cross(iop[:, 1], iop[:, 0]) - @one_time + @cached_property def rotation_matrix(self): """Return rotation matrix between array indices and mm @@ -193,7 +193,7 @@ def rotation_matrix(self): raise WrapperPrecisionError('Rotation matrix not nearly orthogonal') return R - @one_time + @cached_property def voxel_sizes(self): """voxel sizes for array as returned by ``get_data()``""" # pix space gives (row_spacing, column_spacing). That is, the @@ -212,7 +212,7 @@ def voxel_sizes(self): pix_space = list(map(float, pix_space)) return tuple(pix_space + [zs]) - @one_time + @cached_property def image_position(self): """Return position of first voxel in data block @@ -231,7 +231,7 @@ def image_position(self): # Values are python Decimals in pydicom 0.9.7 return np.array(list(map(float, ipp))) - @one_time + @cached_property def slice_indicator(self): """A number that is higher for higher slices in Z @@ -246,12 +246,12 @@ def slice_indicator(self): return None return np.inner(ipp, s_norm) - @one_time + @cached_property def instance_number(self): """Just because we use this a lot for sorting""" return self.get('InstanceNumber') - @one_time + @cached_property def series_signature(self): """Signature for matching slices into series @@ -390,7 +390,7 @@ def _apply_scale_offset(self, data, scale, offset): return data + offset return data - @one_time + @cached_property def b_value(self): """Return b value for diffusion or None if not available""" q_vec = self.q_vector @@ -398,7 +398,7 @@ def b_value(self): return None return q2bg(q_vec)[0] - @one_time + @cached_property def b_vector(self): """Return b vector for diffusion or None if not available""" q_vec = self.q_vector @@ -469,7 +469,7 @@ def __init__(self, dcm_data): raise WrapperError('SharedFunctionalGroupsSequence is empty.') self._shape = None - @one_time + @cached_property def image_shape(self): """The array shape as it will be returned by ``get_data()`` @@ -573,7 +573,7 @@ def image_shape(self): ) return tuple(shape) - @one_time + @cached_property def image_orient_patient(self): """ Note that this is _not_ LR flipped @@ -590,7 +590,7 @@ def image_orient_patient(self): iop = np.array(list(map(float, iop))) return np.array(iop).reshape(2, 3).T - @one_time + @cached_property def voxel_sizes(self): """Get i, j, k voxel sizes""" try: @@ -610,7 +610,7 @@ def voxel_sizes(self): # Ensure values are float rather than Decimal return tuple(map(float, list(pix_space) + [zs])) - @one_time + @cached_property def image_position(self): try: ipp = self.shared.PlanePositionSequence[0].ImagePositionPatient @@ -623,7 +623,7 @@ def image_position(self): return None return np.array(list(map(float, ipp))) - @one_time + @cached_property def series_signature(self): signature = {} eq = operator.eq @@ -696,7 +696,7 @@ def __init__(self, dcm_data, csa_header=None): csa_header = {} self.csa_header = csa_header - @one_time + @cached_property def slice_normal(self): # The std_slice_normal comes from the cross product of the directions # in the ImageOrientationPatient @@ -720,7 +720,7 @@ def slice_normal(self): else: return std_slice_normal - @one_time + @cached_property def series_signature(self): """Add ICE dims from CSA header to signature""" signature = super().series_signature @@ -730,7 +730,7 @@ def series_signature(self): signature['ICE_Dims'] = (ice, operator.eq) return signature - @one_time + @cached_property def b_matrix(self): """Get DWI B matrix referring to voxel space @@ -767,7 +767,7 @@ def b_matrix(self): # semi-definite. return nearest_pos_semi_def(B_vox) - @one_time + @cached_property def q_vector(self): """Get DWI q vector referring to voxel space @@ -840,7 +840,7 @@ def __init__(self, dcm_data, csa_header=None, n_mosaic=None): self.n_mosaic = n_mosaic self.mosaic_size = int(np.ceil(np.sqrt(n_mosaic))) - @one_time + @cached_property def image_shape(self): """Return image shape as returned by ``get_data()``""" # reshape pixel slice array back from mosaic @@ -850,7 +850,7 @@ def image_shape(self): return None return (rows // self.mosaic_size, cols // self.mosaic_size, self.n_mosaic) - @one_time + @cached_property def image_position(self): """Return position of first voxel in data block From c49dff290f6113327eaa62bbd8aff4da924dd54a Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Tue, 18 Jun 2024 16:44:03 -0700 Subject: [PATCH 599/702] BF: Fix for 'split' (concatenated?) multiframe DICOM Can't just use number of frame indices to determine shape of data, as the actual frames could still be split into different files. Also can't assume a multiframe file is more than a single slice. --- nibabel/nicom/dicomwrappers.py | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 2270ed3f05..894a0ed219 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -554,23 +554,20 @@ def image_shape(self): raise WrapperError('Missing information, cannot remove indices with confidence.') derived_dim_idx = dim_seq.index(derived_tag) frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) - # account for the 2 additional dimensions (row and column) not included - # in the indices - n_dim = frame_indices.shape[1] + 2 # Store frame indices self._frame_indices = frame_indices - if n_dim < 4: # 3D volume - return rows, cols, n_frames - # More than 3 dimensions + # Determine size of any extra-spatial dimensions ns_unique = [len(np.unique(row)) for row in self._frame_indices.T] - shape = (rows, cols) + tuple(ns_unique) - n_vols = np.prod(shape[3:]) - n_frames_calc = n_vols * shape[2] - if n_frames != n_frames_calc: - raise WrapperError( - f'Calculated # of frames ({n_frames_calc}={n_vols}*{shape[2]}) ' - f'of shape {shape} does not match NumberOfFrames {n_frames}.' - ) + shape = (rows, cols) + tuple(x for i, x in enumerate(ns_unique) if i == 0 or x != 1) + n_dim = len(shape) + if n_dim > 3: + n_vols = np.prod(shape[3:]) + n_frames_calc = n_vols * shape[2] + if n_frames != n_frames_calc: + raise WrapperError( + f'Calculated # of frames ({n_frames_calc}={n_vols}*{shape[2]}) ' + f'of shape {shape} does not match NumberOfFrames {n_frames}.' + ) return tuple(shape) @cached_property @@ -640,10 +637,11 @@ def get_data(self): raise WrapperError('No valid information for image shape') data = self.get_pixel_array() # Roll frames axis to last - data = data.transpose((1, 2, 0)) - # Sort frames with first index changing fastest, last slowest - sorted_indices = np.lexsort(self._frame_indices.T) - data = data[..., sorted_indices] + if len(data.shape) > 2: + data = data.transpose((1, 2, 0)) + # Sort frames with first index changing fastest, last slowest + sorted_indices = np.lexsort(self._frame_indices.T) + data = data[..., sorted_indices] data = data.reshape(shape, order='F') return self._scale_data(data) From 4063114c2bde09f34d88c1193a5fd20adc8c1932 Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Wed, 24 Jul 2024 15:29:26 -0700 Subject: [PATCH 600/702] BF+TST: Test and fix a bunch of multiframe fixes Corrects issue where order of slice indices was assumed to match the order needed to move along the direction of the slice normal, which resulted in slice orientation flips. Ignores indices that don't evenly divide data, and at the end will try to combine those indices (if needed) into a single tuple index. --- nibabel/nicom/dicomwrappers.py | 124 +++++++++++++++----- nibabel/nicom/tests/test_dicomwrappers.py | 132 ++++++++++++++++++---- 2 files changed, 203 insertions(+), 53 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 894a0ed219..c3f484a003 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -467,6 +467,25 @@ def __init__(self, dcm_data): self.shared = dcm_data.get('SharedFunctionalGroupsSequence')[0] except TypeError: raise WrapperError('SharedFunctionalGroupsSequence is empty.') + # Try to determine slice order and minimal image position patient + self._frame_slc_ord = self._ipp = None + try: + frame_ipps = [self.shared.PlanePositionSequence[0].ImagePositionPatient] + except AttributeError: + try: + frame_ipps = [f.PlanePositionSequence[0].ImagePositionPatient for f in self.frames] + except AttributeError: + frame_ipps = None + if frame_ipps is not None and all(ipp is not None for ipp in frame_ipps): + frame_ipps = [np.array(list(map(float, ipp))) for ipp in frame_ipps] + frame_slc_pos = [np.inner(ipp, self.slice_normal) for ipp in frame_ipps] + rnd_slc_pos = np.round(frame_slc_pos, 4) + uniq_slc_pos = np.unique(rnd_slc_pos) + pos_ord_map = { + val: order for val, order in zip(uniq_slc_pos, np.argsort(uniq_slc_pos)) + } + self._frame_slc_ord = [pos_ord_map[pos] for pos in rnd_slc_pos] + self._ipp = frame_ipps[np.argmin(frame_slc_pos)] self._shape = None @cached_property @@ -509,14 +528,16 @@ def image_shape(self): if hasattr(first_frame, 'get') and first_frame.get([0x18, 0x9117]): # DWI image may include derived isotropic, ADC or trace volume try: - anisotropic = pydicom.Sequence( - frame - for frame in self.frames - if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' - ) + aniso_frames = pydicom.Sequence() + aniso_slc_ord = [] + for slc_ord, frame in zip(self._frame_slc_ord, self.frames): + if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC': + aniso_frames.append(frame) + aniso_slc_ord.append(slc_ord) # Image contains DWI volumes followed by derived images; remove derived images - if len(anisotropic) != 0: - self.frames = anisotropic + if len(aniso_frames) != 0: + self.frames = aniso_frames + self._frame_slc_ord = aniso_slc_ord except IndexError: # Sequence tag is found but missing items! raise WrapperError('Diffusion file missing information') @@ -554,20 +575,70 @@ def image_shape(self): raise WrapperError('Missing information, cannot remove indices with confidence.') derived_dim_idx = dim_seq.index(derived_tag) frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) + # Determine the shape and which indices to use + shape = [rows, cols] + curr_parts = n_frames + frames_per_part = 1 + del_indices = {} + for row_idx, row in enumerate(frame_indices.T): + if curr_parts == 1: + break + unique = np.unique(row) + count = len(unique) + if count == 1: + continue + # Replace slice indices with order determined from slice positions along normal + if len(shape) == 2: + row = self._frame_slc_ord + frame_indices.T[row_idx, :] = row + unique = np.unique(row) + if len(unique) != count: + raise WrapperError("Number of slice indices and positions don't match") + new_parts, leftover = divmod(curr_parts, count) + allowed_val_counts = [new_parts * frames_per_part] + if len(shape) > 2: + # Except for the slice dim, having a unique value for each frame is valid + allowed_val_counts.append(n_frames) + if leftover != 0 or any( + np.count_nonzero(row == val) not in allowed_val_counts for val in unique + ): + if len(shape) == 2: + raise WrapperError('Missing slices from multiframe') + del_indices[row_idx] = count + continue + frames_per_part *= count + shape.append(count) + curr_parts = new_parts + if del_indices: + if curr_parts > 1: + ns_failed = [k for k, v in del_indices.items() if v != 1] + if len(ns_failed) > 1: + # If some indices weren't used yet but we still have unaccounted for + # partitions, try combining indices into single tuple and using that + tup_dtype = np.dtype(','.join(['I'] * len(ns_failed))) + row = [tuple(x for x in vals) for vals in frame_indices[:, ns_failed]] + row = np.array(row, dtype=tup_dtype) + frame_indices = np.delete(frame_indices, np.array(list(del_indices.keys())), axis=1) + if curr_parts > 1 and len(ns_failed) > 1: + unique = np.unique(row, axis=0) + count = len(unique) + new_parts, rem = divmod(curr_parts, count) + allowed_val_counts = [new_parts * frames_per_part, n_frames] + if rem == 0 and all( + np.count_nonzero(row == val) in allowed_val_counts for val in unique + ): + shape.append(count) + curr_parts = new_parts + ord_vals = np.argsort(unique) + order = {tuple(unique[i]): ord_vals[i] for i in range(count)} + ord_row = np.array([order[tuple(v)] for v in row]) + frame_indices = np.hstack( + [frame_indices, np.array(ord_row).reshape((n_frames, 1))] + ) + if curr_parts > 1: + raise WrapperError('Unable to determine sorting of final dimension(s)') # Store frame indices self._frame_indices = frame_indices - # Determine size of any extra-spatial dimensions - ns_unique = [len(np.unique(row)) for row in self._frame_indices.T] - shape = (rows, cols) + tuple(x for i, x in enumerate(ns_unique) if i == 0 or x != 1) - n_dim = len(shape) - if n_dim > 3: - n_vols = np.prod(shape[3:]) - n_frames_calc = n_vols * shape[2] - if n_frames != n_frames_calc: - raise WrapperError( - f'Calculated # of frames ({n_frames_calc}={n_vols}*{shape[2]}) ' - f'of shape {shape} does not match NumberOfFrames {n_frames}.' - ) return tuple(shape) @cached_property @@ -607,18 +678,11 @@ def voxel_sizes(self): # Ensure values are float rather than Decimal return tuple(map(float, list(pix_space) + [zs])) - @cached_property + @property def image_position(self): - try: - ipp = self.shared.PlanePositionSequence[0].ImagePositionPatient - except AttributeError: - try: - ipp = self.frames[0].PlanePositionSequence[0].ImagePositionPatient - except AttributeError: - raise WrapperError('Cannot get image position from dicom') - if ipp is None: - return None - return np.array(list(map(float, ipp))) + if self._ipp is None: + raise WrapperError('Not enough information for image_position_patient') + return self._ipp @cached_property def series_signature(self): diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index d14c35dcdb..25a58d70e5 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -364,7 +364,7 @@ def test_decimal_rescale(): assert dw.get_data().dtype != np.dtype(object) -def fake_frames(seq_name, field_name, value_seq): +def fake_frames(seq_name, field_name, value_seq, frame_seq=None): """Make fake frames for multiframe testing Parameters @@ -375,6 +375,8 @@ def fake_frames(seq_name, field_name, value_seq): name of field within sequence value_seq : length N sequence sequence of values + frame_seq : length N list + previous result from this function to update Returns ------- @@ -386,19 +388,28 @@ def fake_frames(seq_name, field_name, value_seq): class Fake: pass - frames = [] - for value in value_seq: - fake_frame = Fake() + if frame_seq == None: + frame_seq = [Fake() for _ in range(len(value_seq))] + for value, fake_frame in zip(value_seq, frame_seq): fake_element = Fake() setattr(fake_element, field_name, value) setattr(fake_frame, seq_name, [fake_element]) - frames.append(fake_frame) - return frames + return frame_seq -def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): +def fake_shape_dependents( + div_seq, + sid_seq=None, + sid_dim=None, + ipp_seq=None, + slice_dim=None, + flip_ipp_idx_corr=False, +): """Make a fake dictionary of data that ``image_shape`` is dependent on. + If you are providing the ``ipp_seq`` argument, they should be generated using + a slice normal aligned with the z-axis (i.e. iop == (0, 1, 0, 1, 0, 0)). + Parameters ---------- div_seq : list of tuples @@ -407,39 +418,86 @@ def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): list of values to use for the `StackID` of each frame. sid_dim : int the index of the column in 'div_seq' to use as 'sid_seq' + ipp_seq : list of tuples + list of values to use for `ImagePositionPatient` for each frame + slice_dim : int + the index of the column in 'div_seq' corresponding to slices + flip_ipp_idx_corr : bool + generate ipp values so slice location is negatively correlated with slice index """ - class DimIdxSeqElem: + class PrintBase: + def __repr__(self): + attr_strs = [] + for attr in dir(self): + if attr[0].isupper(): + attr_strs.append(f'{attr}={getattr(self, attr)}') + return f"{self.__class__.__name__}({', '.join(attr_strs)})" + + class DimIdxSeqElem(PrintBase): def __init__(self, dip=(0, 0), fgp=None): self.DimensionIndexPointer = dip if fgp is not None: self.FunctionalGroupPointer = fgp - class FrmContSeqElem: + class FrmContSeqElem(PrintBase): def __init__(self, div, sid): self.DimensionIndexValues = div self.StackID = sid - class PerFrmFuncGrpSeqElem: - def __init__(self, div, sid): + class PlnPosSeqElem(PrintBase): + def __init__(self, ipp): + self.ImagePositionPatient = ipp + + class PlnOrientSeqElem(PrintBase): + def __init__(self, iop): + self.ImageOrientationPatient = iop + + class PerFrmFuncGrpSeqElem(PrintBase): + def __init__(self, div, sid, ipp, iop): self.FrameContentSequence = [FrmContSeqElem(div, sid)] + self.PlanePositionSequence = [PlnPosSeqElem(ipp)] + self.PlaneOrientationSequence = [PlnOrientSeqElem(iop)] # if no StackID values passed in then use the values at index 'sid_dim' in # the value for DimensionIndexValues for it + n_indices = len(div_seq[0]) if sid_seq is None: if sid_dim is None: sid_dim = 0 sid_seq = [div[sid_dim] for div in div_seq] - # create the DimensionIndexSequence + # Determine slice_dim and create per-slice ipp information + if slice_dim is None: + slice_dim = 1 if sid_dim == 0 else 0 num_of_frames = len(div_seq) - dim_idx_seq = [DimIdxSeqElem()] * num_of_frames + frame_slc_indices = np.array(div_seq)[:, slice_dim] + uniq_slc_indices = np.unique(frame_slc_indices) + n_slices = len(uniq_slc_indices) + assert num_of_frames % n_slices == 0 + iop_seq = [(0.0, 1.0, 0.0, 1.0, 0.0, 0.0) for _ in range(num_of_frames)] + if ipp_seq is None: + slc_locs = np.linspace(-1.0, 1.0, n_slices) + if flip_ipp_idx_corr: + slc_locs = slc_locs[::-1] + slc_idx_loc = { + div_idx: slc_locs[arr_idx] for arr_idx, div_idx in enumerate(np.sort(uniq_slc_indices)) + } + ipp_seq = [(-1.0, -1.0, slc_idx_loc[idx]) for idx in frame_slc_indices] + else: + assert flip_ipp_idx_corr is False # caller can flip it themselves + assert len(ipp_seq) == num_of_frames + # create the DimensionIndexSequence + dim_idx_seq = [DimIdxSeqElem()] * n_indices # add an entry for StackID into the DimensionIndexSequence if sid_dim is not None: sid_tag = pydicom.datadict.tag_for_keyword('StackID') fcs_tag = pydicom.datadict.tag_for_keyword('FrameContentSequence') dim_idx_seq[sid_dim] = DimIdxSeqElem(sid_tag, fcs_tag) # create the PerFrameFunctionalGroupsSequence - frames = [PerFrmFuncGrpSeqElem(div, sid) for div, sid in zip(div_seq, sid_seq)] + frames = [ + PerFrmFuncGrpSeqElem(div, sid, ipp, iop) + for div, sid, ipp, iop in zip(div_seq, sid_seq, ipp_seq, iop_seq) + ] return { 'NumberOfFrames': num_of_frames, 'DimensionIndexSequence': dim_idx_seq, @@ -480,7 +538,15 @@ def test_shape(self): # PerFrameFunctionalGroupsSequence does not match NumberOfFrames with pytest.raises(AssertionError): dw.image_shape - # check 3D shape when StackID index is 0 + # check 2D shape with StackID index is 0 + div_seq = ((1, 1),) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64) + # Check 2D shape with extraneous extra indices + div_seq = ((1, 1, 2),) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64) + # Check 3D shape when StackID index is 0 div_seq = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 4) @@ -541,6 +607,18 @@ def test_shape(self): div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), (1, 1, 3), (2, 1, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) + # Test with combo indices, here with the last two needing to be combined into + # a single index corresponding to [(1, 1), (1, 1), (2, 1), (2, 1), (2, 2), (2, 2)] + div_seq = ( + (1, 1, 1, 1), + (1, 2, 1, 1), + (1, 1, 2, 1), + (1, 2, 2, 1), + (1, 1, 2, 2), + (1, 2, 2, 2), + ) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3) def test_iop(self): # Test Image orient patient for multiframe @@ -608,22 +686,30 @@ def test_image_position(self): with pytest.raises(didw.WrapperError): dw.image_position # Make a fake frame - fake_frame = fake_frames( - 'PlanePositionSequence', 'ImagePositionPatient', [[-2.0, 3.0, 7]] - )[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] + iop = [0, 1, 0, 1, 0, 0] + frames = fake_frames('PlaneOrientationSequence', 'ImageOrientationPatient', [iop]) + frames = fake_frames( + 'PlanePositionSequence', 'ImagePositionPatient', [[-2.0, 3.0, 7]], frames + ) + fake_mf['SharedFunctionalGroupsSequence'] = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) fake_mf['SharedFunctionalGroupsSequence'] = [None] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_position - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] + fake_mf['PerFrameFunctionalGroupsSequence'] = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) # Check lists of Decimals work - fake_frame.PlanePositionSequence[0].ImagePositionPatient = [ + frames[0].PlanePositionSequence[0].ImagePositionPatient = [ Decimal(str(v)) for v in [-2, 3, 7] ] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) assert MFW(fake_mf).image_position.dtype == float + # We should get minimum along slice normal with multiple frames + frames = fake_frames('PlaneOrientationSequence', 'ImageOrientationPatient', [iop] * 2) + ipps = [[-2.0, 3.0, 7], [-2.0, 3.0, 6]] + frames = fake_frames('PlanePositionSequence', 'ImagePositionPatient', ipps, frames) + fake_mf['PerFrameFunctionalGroupsSequence'] = frames + assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 6]) @dicom_test @pytest.mark.xfail(reason='Not packaged in install', raises=FileNotFoundError) @@ -644,7 +730,7 @@ def test_data_real(self): if endian_codes[data.dtype.byteorder] == '>': data = data.byteswap() dat_str = data.tobytes() - assert sha1(dat_str).hexdigest() == '149323269b0af92baa7508e19ca315240f77fa8c' + assert sha1(dat_str).hexdigest() == 'dc011bb49682fb78f3cebacf965cb65cc9daba7d' @dicom_test def test_slicethickness_fallback(self): @@ -665,7 +751,7 @@ def test_data_derived_shape(self): def test_data_trace(self): # Test that a standalone trace volume is found and not dropped dw = didw.wrapper_from_file(DATA_FILE_SIEMENS_TRACE) - assert dw.image_shape == (72, 72, 39, 1) + assert dw.image_shape == (72, 72, 39) @dicom_test @needs_nibabel_data('nitest-dicom') From 14c24ef7fc156d2a0bb760304e482cfde4694bc3 Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Wed, 24 Jul 2024 16:34:11 -0700 Subject: [PATCH 601/702] BF: Trim unneeded trailing indices from _frame_indices --- nibabel/nicom/dicomwrappers.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index c3f484a003..eab0471ec4 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -581,11 +581,10 @@ def image_shape(self): frames_per_part = 1 del_indices = {} for row_idx, row in enumerate(frame_indices.T): - if curr_parts == 1: - break unique = np.unique(row) count = len(unique) - if count == 1: + if count == 1 or curr_parts == 1: + del_indices[row_idx] = count continue # Replace slice indices with order determined from slice positions along normal if len(shape) == 2: From 019f448c9924e352ed5503aae384b59918bb1d95 Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Wed, 24 Jul 2024 17:09:09 -0700 Subject: [PATCH 602/702] BF+TST: Fix 2D plus time case Explicitly use `InStackPositionNumber` to identify the slice dim, produce correct output for 2D + time data. --- nibabel/nicom/dicomwrappers.py | 17 +++++++++++------ nibabel/nicom/tests/test_dicomwrappers.py | 9 ++++++++- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index eab0471ec4..14041e631f 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -470,10 +470,10 @@ def __init__(self, dcm_data): # Try to determine slice order and minimal image position patient self._frame_slc_ord = self._ipp = None try: - frame_ipps = [self.shared.PlanePositionSequence[0].ImagePositionPatient] + frame_ipps = [f.PlanePositionSequence[0].ImagePositionPatient for f in self.frames] except AttributeError: try: - frame_ipps = [f.PlanePositionSequence[0].ImagePositionPatient for f in self.frames] + frame_ipps = [self.shared.PlanePositionSequence[0].ImagePositionPatient] except AttributeError: frame_ipps = None if frame_ipps is not None and all(ipp is not None for ipp in frame_ipps): @@ -575,19 +575,24 @@ def image_shape(self): raise WrapperError('Missing information, cannot remove indices with confidence.') derived_dim_idx = dim_seq.index(derived_tag) frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) + dim_seq.pop(derived_dim_idx) # Determine the shape and which indices to use shape = [rows, cols] curr_parts = n_frames frames_per_part = 1 del_indices = {} + stackpos_tag = pydicom.datadict.tag_for_keyword('InStackPositionNumber') + slice_dim_idx = dim_seq.index(stackpos_tag) for row_idx, row in enumerate(frame_indices.T): unique = np.unique(row) count = len(unique) - if count == 1 or curr_parts == 1: + if curr_parts == 1 or (count == 1 and row_idx != slice_dim_idx): del_indices[row_idx] = count continue # Replace slice indices with order determined from slice positions along normal - if len(shape) == 2: + if row_idx == slice_dim_idx: + if len(shape) > 2: + raise WrapperError('Non-singular index precedes the slice index') row = self._frame_slc_ord frame_indices.T[row_idx, :] = row unique = np.unique(row) @@ -595,13 +600,13 @@ def image_shape(self): raise WrapperError("Number of slice indices and positions don't match") new_parts, leftover = divmod(curr_parts, count) allowed_val_counts = [new_parts * frames_per_part] - if len(shape) > 2: + if row_idx != slice_dim_idx: # Except for the slice dim, having a unique value for each frame is valid allowed_val_counts.append(n_frames) if leftover != 0 or any( np.count_nonzero(row == val) not in allowed_val_counts for val in unique ): - if len(shape) == 2: + if row_idx == slice_dim_idx: raise WrapperError('Missing slices from multiframe') del_indices[row_idx] = count continue diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 25a58d70e5..0402421626 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -488,10 +488,13 @@ def __init__(self, div, sid, ipp, iop): assert len(ipp_seq) == num_of_frames # create the DimensionIndexSequence dim_idx_seq = [DimIdxSeqElem()] * n_indices + # Add entry for InStackPositionNumber to DimensionIndexSequence + fcs_tag = pydicom.datadict.tag_for_keyword('FrameContentSequence') + isp_tag = pydicom.datadict.tag_for_keyword('InStackPositionNumber') + dim_idx_seq[slice_dim] = DimIdxSeqElem(isp_tag, fcs_tag) # add an entry for StackID into the DimensionIndexSequence if sid_dim is not None: sid_tag = pydicom.datadict.tag_for_keyword('StackID') - fcs_tag = pydicom.datadict.tag_for_keyword('FrameContentSequence') dim_idx_seq[sid_dim] = DimIdxSeqElem(sid_tag, fcs_tag) # create the PerFrameFunctionalGroupsSequence frames = [ @@ -546,6 +549,10 @@ def test_shape(self): div_seq = ((1, 1, 2),) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64) + # Check 2D plus time + div_seq = ((1, 1, 1), (1, 1, 2), (1, 1, 3)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 1, 3) # Check 3D shape when StackID index is 0 div_seq = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) From 0215ce5db008f32e6001335f2b4d4f39d5a0a346 Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Wed, 24 Jul 2024 18:33:56 -0700 Subject: [PATCH 603/702] BF+TST: Handle case with extra-spatial index that is unique per frame Not sure if this ever actually happens in real multiframe data, but it does in non-multiframe and I can imagine if a DimensionIndexSequence element refrences a per-frame AcquisitionTime then this could happen. --- nibabel/nicom/dicomwrappers.py | 25 +++++++++++++------ nibabel/nicom/tests/test_dicomwrappers.py | 29 +++++++++++++++++++++++ 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 14041e631f..3743878700 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -598,21 +598,32 @@ def image_shape(self): unique = np.unique(row) if len(unique) != count: raise WrapperError("Number of slice indices and positions don't match") + elif count == n_frames: + if shape[-1] == 'remaining': + raise WrapperError('At most one index have ambiguous size') + shape.append('remaining') + continue new_parts, leftover = divmod(curr_parts, count) - allowed_val_counts = [new_parts * frames_per_part] - if row_idx != slice_dim_idx: - # Except for the slice dim, having a unique value for each frame is valid - allowed_val_counts.append(n_frames) - if leftover != 0 or any( - np.count_nonzero(row == val) not in allowed_val_counts for val in unique - ): + expected = new_parts * frames_per_part + if leftover != 0 or any(np.count_nonzero(row == val) != expected for val in unique): if row_idx == slice_dim_idx: raise WrapperError('Missing slices from multiframe') del_indices[row_idx] = count continue + if shape[-1] == 'remaining': + shape[-1] = new_parts + frames_per_part *= shape[-1] + new_parts = 1 frames_per_part *= count shape.append(count) curr_parts = new_parts + if shape[-1] == 'remaining': + if curr_parts > 1: + shape[-1] = curr_parts + curr_parts = 1 + else: + del_indices[len(shape)] = 1 + shape = shape[:-1] if del_indices: if curr_parts > 1: ns_failed = [k for k, v in del_indices.items() if v != 1] diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 0402421626..b50535a4bb 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -626,6 +626,35 @@ def test_shape(self): ) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) + # Test invalid 4D indices + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 4)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 2)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + # Time index that is unique to each frame + div_seq = ((1, 1, 1), (1, 2, 2), (1, 1, 3), (1, 2, 4), (1, 1, 5), (1, 2, 6)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3) + div_seq = ( + (1, 1, 1, 1), + (1, 2, 2, 1), + (1, 1, 3, 1), + (1, 2, 4, 1), + (1, 1, 5, 1), + (1, 2, 6, 1), + (1, 1, 7, 2), + (1, 2, 8, 2), + (1, 1, 9, 2), + (1, 2, 10, 2), + (1, 1, 11, 2), + (1, 2, 12, 2), + ) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3, 2) def test_iop(self): # Test Image orient patient for multiframe From 259483f1f5412e4e8deb919b800b72abddccd439 Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Wed, 24 Jul 2024 23:24:53 -0700 Subject: [PATCH 604/702] TST: Expand test coverage for multiframe dicom shape determination --- nibabel/nicom/tests/test_dicomwrappers.py | 33 ++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index b50535a4bb..2168476bb4 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -473,7 +473,6 @@ def __init__(self, div, sid, ipp, iop): frame_slc_indices = np.array(div_seq)[:, slice_dim] uniq_slc_indices = np.unique(frame_slc_indices) n_slices = len(uniq_slc_indices) - assert num_of_frames % n_slices == 0 iop_seq = [(0.0, 1.0, 0.0, 1.0, 0.0, 0.0) for _ in range(num_of_frames)] if ipp_seq is None: slc_locs = np.linspace(-1.0, 1.0, n_slices) @@ -579,6 +578,17 @@ def test_shape(self): div_seq = ((1, 1, 0), (1, 2, 0), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 2, 2) + # Check number of IPP vals match the number of slices or we raise + frames = fake_mf['PerFrameFunctionalGroupsSequence'] + for frame in frames[1:]: + frame.PlanePositionSequence = frames[0].PlanePositionSequence[:] + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + # Check we raise on missing slices + div_seq = ((1, 1, 0), (1, 2, 0), (1, 1, 1)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # check 3D shape when there is no StackID index div_seq = ((1,), (2,), (3,), (4,)) sid_seq = (1, 1, 1, 1) @@ -614,6 +624,11 @@ def test_shape(self): div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), (1, 1, 3), (2, 1, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) + # Check non-singular dimension preceding slice dim raises + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 3)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0, slice_dim=2)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # Test with combo indices, here with the last two needing to be combined into # a single index corresponding to [(1, 1), (1, 1), (2, 1), (2, 1), (2, 2), (2, 2)] div_seq = ( @@ -655,6 +670,22 @@ def test_shape(self): ) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3, 2) + # Check we only allow one extra spatial dimension with unique val per frame + div_seq = ( + (1, 1, 1, 6), + (1, 2, 2, 5), + (1, 1, 3, 4), + (1, 2, 4, 3), + (1, 1, 5, 2), + (1, 2, 6, 1), + ) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + # Check that having unique value per frame works with single volume + div_seq = ((1, 1, 1), (1, 2, 2), (1, 3, 3)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 3) def test_iop(self): # Test Image orient patient for multiframe From 52c31052e4f22ff7f0a01883129584c6091e9ac9 Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Thu, 25 Jul 2024 09:58:19 -0700 Subject: [PATCH 605/702] TST+CLN: More slice ordering testing, minor cleanup --- nibabel/nicom/tests/test_dicomwrappers.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 2168476bb4..e01759c86a 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -388,7 +388,7 @@ def fake_frames(seq_name, field_name, value_seq, frame_seq=None): class Fake: pass - if frame_seq == None: + if frame_seq is None: frame_seq = [Fake() for _ in range(len(value_seq))] for value, fake_frame in zip(value_seq, frame_seq): fake_element = Fake() @@ -868,6 +868,11 @@ def test_data_fake(self): sorted_data = data[..., [3, 1, 2, 0]] fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) + # Check slice sorting with negative index / IPP correlation + fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0, flip_ipp_idx_corr=True)) + sorted_data = data[..., [0, 2, 1, 3]] + fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) + assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) # 5D! dim_idxs = [ [1, 4, 2, 1], From 629dbb52e14e813203d1f9c355de95399fd70dda Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Thu, 25 Jul 2024 10:05:32 -0700 Subject: [PATCH 606/702] DOC: Add some notes to the changelog --- Changelog | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/Changelog b/Changelog index 6892951256..24e89095f3 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,33 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +Upcoming release (To be determined) +=================================== + +New features +------------ + +Enhancements +------------ + * Ability to read data from many multiframe DICOM files that previously generated errors + +Bug fixes +--------- + * Fixed multiframe DICOM issue where data could be flipped along slice dimension relative to the + affine + * Fixed multiframe DICOM issue where ``image_position`` and the translation component in the + ``affine`` could be incorrect + +Documentation +------------- + +Maintenance +----------- + +API changes and deprecations +---------------------------- + + 5.2.1 (Monday 26 February 2024) =============================== From fd56bf4abe195da9d351d64345381231ce7f7038 Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Mon, 12 Aug 2024 15:08:26 -0700 Subject: [PATCH 607/702] BF+ENH: Fixes to DICOM scaling, make frame filtering explicit Fixes how we handle DICOM scaling, particularly for Philips and multi-frame files. For Philips data scale factors without defined units should be avoided, and instead a private tag should be used to make image intensities comparable across series. For multi-frame DICOM, it is possible to have different scale factors (potentially coming from different tags) per-frame. We also prefer scale factors from a RealWorldValueMapping provided they have defined units. The base Wrapper class now has a few new attributes and methods to support this functionality. In particular an attribute `scale_factors` that provides an array of slope/intercept pairs, and a method `get_unscaled_data` that will return the reordered/reshaped data but without the scaling applied. A `vendor` attribute was also added to better support vendor-specific implementation details. For the MultiFrameWrapper I also added an attribute `frame_order` which exposes the order used to sort the frames, and use this to return the `scale_factors` in sorted order. While implementing this I kept bumping into issues due to the (implicit) frame filtering that was happening in the `image_shape` property, so I made this filtering explicit and configurable and moved it into the class initialization. --- nibabel/nicom/dicomwrappers.py | 410 +++++++++++++++++----- nibabel/nicom/tests/test_dicomwrappers.py | 363 ++++++++++++++----- nibabel/nicom/utils.py | 54 +++ 3 files changed, 636 insertions(+), 191 deletions(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 3743878700..3842248fd5 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -13,6 +13,7 @@ """ import operator +import re import warnings from functools import cached_property @@ -23,6 +24,7 @@ from ..openers import ImageOpener from . import csareader as csar from .dwiparams import B2q, nearest_pos_semi_def, q2bg +from .utils import Vendor, find_private_section, vendor_from_private pydicom = optional_package('pydicom')[0] @@ -59,7 +61,7 @@ def wrapper_from_file(file_like, *args, **kwargs): return wrapper_from_data(dcm_data) -def wrapper_from_data(dcm_data): +def wrapper_from_data(dcm_data, frame_filters=None): """Create DICOM wrapper from DICOM data object Parameters @@ -68,6 +70,9 @@ def wrapper_from_data(dcm_data): Object allowing attribute access, with DICOM attributes. Probably a dataset as read by ``pydicom``. + frame_filters + Optionally override the `frame_filters` used to create a `MultiFrameWrapper` + Returns ------- dcm_w : ``dicomwrappers.Wrapper`` or subclass @@ -76,9 +81,8 @@ def wrapper_from_data(dcm_data): sop_class = dcm_data.get('SOPClassUID') # try to detect what type of dicom object to wrap if sop_class == '1.2.840.10008.5.1.4.1.1.4.1': # Enhanced MR Image Storage - # currently only Philips is using Enhanced Multiframe DICOM - return MultiframeWrapper(dcm_data) - # Check for Siemens DICOM format types + return MultiframeWrapper(dcm_data, frame_filters) + # Check for non-enhanced (legacy) Siemens DICOM format types # Only Siemens will have data for the CSA header try: csa = csar.get_csa_header(dcm_data) @@ -103,6 +107,7 @@ class Wrapper: Methods: * get_data() + * get_unscaled_data() * get_pixel_array() * is_same_series(other) * __getitem__ : return attributes from `dcm_data` @@ -120,6 +125,8 @@ class Wrapper: * image_position : sequence length 3 * slice_indicator : float * series_signature : tuple + * scale_factors : (N, 2) array + * vendor : Vendor """ is_csa = False @@ -136,10 +143,34 @@ def __init__(self, dcm_data): dcm_data : object object should allow 'get' and '__getitem__' access. Usually this will be a ``dicom.dataset.Dataset`` object resulting from reading a - DICOM file, but a dictionary should also work. + DICOM file. """ self.dcm_data = dcm_data + @cached_property + def vendor(self): + """The vendor of the instrument that produced the DICOM""" + # Look at manufacturer tag first + mfgr = self.get('Manufacturer') + if mfgr: + if re.search('Siemens', mfgr, re.IGNORECASE): + return Vendor.SIEMENS + if re.search('Philips', mfgr, re.IGNORECASE): + return Vendor.PHILIPS + if re.search('GE Medical', mfgr, re.IGNORECASE): + return Vendor.GE + # Next look at UID prefixes + for uid_src in ('StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID'): + uid = str(self.get(uid_src)) + if uid.startswith(('1.3.12.2.1007.', '1.3.12.2.1107.')): + return Vendor.SIEMENS + if uid.startswith(('1.3.46', '1.3.12.2.1017')): + return Vendor.PHILIPS + if uid.startswith('1.2.840.113619'): + return Vendor.GE + # Finally look for vendor specific private blocks + return vendor_from_private(self.dcm_data) + @cached_property def image_shape(self): """The array shape as it will be returned by ``get_data()``""" @@ -315,14 +346,30 @@ def affine(self): return aff def get_pixel_array(self): - """Return unscaled pixel array from DICOM""" + """Return raw pixel array without reshaping or scaling + + Returns + ------- + data : array + array with raw pixel data from DICOM + """ data = self.dcm_data.get('pixel_array') if data is None: raise WrapperError('Cannot find data in DICOM') return data + def get_unscaled_data(self): + """Return pixel array that is potentially reshaped, but without any scaling + + Returns + ------- + data : array + array with raw pixel data from DICOM + """ + return self.get_pixel_array() + def get_data(self): - """Get scaled image data from DICOMs + """Get potentially scaled and reshaped image data from DICOMs We return the data as DICOM understands it, first dimension is rows, second dimension is columns @@ -333,7 +380,7 @@ def get_data(self): array with data as scaled from any scaling in the DICOM fields. """ - return self._scale_data(self.get_pixel_array()) + return self._scale_data(self.get_unscaled_data()) def is_same_series(self, other): """Return True if `other` appears to be in same series @@ -372,11 +419,86 @@ def is_same_series(self, other): return False return True + @cached_property + def scale_factors(self): + """Return (2, N) array of slope/intercept pairs""" + scaling = self._get_best_scale_factor(self.dcm_data) + if scaling is None: + if self.vendor == Vendor.PHILIPS: + warnings.warn( + 'Unable to find Philips private scale factor, cross-series comparisons may be invalid' + ) + scaling = (1, 0) + return np.array((scaling,)) + + def _get_rwv_scale_factor(self, dcm_data): + """Return the first set of 'real world' scale factors with defined units""" + rw_seq = dcm_data.get('RealWorldValueMappingSequence') + if rw_seq: + for rw_map in rw_seq: + try: + units = rw_map.MeasurementUnitsCodeSequence[0].CodeMeaning + except (AttributeError, IndexError): + continue + if units not in ('', 'no units', 'UNDEFINED'): + return ( + rw_map.get('RealWorldValueSlope', 1), + rw_map.get('RealWorldValueIntercept', 0), + ) + + def _get_legacy_scale_factor(self, dcm_data): + """Return scale factors from older 'Modality LUT' macro + + For Philips data we require RescaleType is defined and not set to 'normalized' + """ + pix_trans_seq = dcm_data.get('PixelValueTransformationSequence') + if pix_trans_seq is not None: + pix_trans = pix_trans_seq[0] + if self.vendor != Vendor.PHILIPS or pix_trans.get('RescaleType', 'US') not in ( + '', + 'US', + 'normalized', + ): + return (pix_trans.get('RescaleSlope', 1), pix_trans.get('RescaleIntercept', 0)) + if ( + dcm_data.get('RescaleSlope') is not None + or dcm_data.get('RescaleIntercept') is not None + ): + if self.vendor != Vendor.PHILIPS or dcm_data.get('RescaleType', 'US') not in ( + '', + 'US', + 'normalized', + ): + return (dcm_data.get('RescaleSlope', 1), dcm_data.get('RescaleIntercept', 0)) + + def _get_philips_scale_factor(self, dcm_data): + """Return scale factors from Philips private element + + If we don't have any other scale factors that are tied to real world units, then + this is the best scaling to use to enable cross-series comparisons + """ + offset = find_private_section(dcm_data, 0x2005, 'Philips MR Imaging DD 001') + priv_scale = None if offset is None else dcm_data.get((0x2005, offset + 0xE)) + if priv_scale is not None: + return (priv_scale.value, 0.0) + + def _get_best_scale_factor(self, dcm_data): + """Return the most appropriate scale factor found or None""" + scaling = self._get_rwv_scale_factor(dcm_data) + if scaling is not None: + return scaling + scaling = self._get_legacy_scale_factor(dcm_data) + if scaling is not None: + return scaling + if self.vendor == Vendor.PHILIPS: + scaling = self._get_philips_scale_factor(dcm_data) + if scaling is not None: + return scaling + def _scale_data(self, data): # depending on pydicom and dicom files, values might need casting from # Decimal to float - scale = float(self.get('RescaleSlope', 1)) - offset = float(self.get('RescaleIntercept', 0)) + scale, offset = self.scale_factors[0] return self._apply_scale_offset(data, scale, offset) def _apply_scale_offset(self, data, scale, offset): @@ -407,6 +529,71 @@ def b_vector(self): return q2bg(q_vec)[1] +class FrameFilter: + """Base class for defining how to filter out (ignore) frames from a multiframe file + + It is guaranteed that the `applies` method will on a dataset before the `keep` method + is called on any of the frames inside. + """ + + def applies(self, dcm_wrp) -> bool: + """Returns true if the filter should be applied to a dataset""" + return True + + def keep(self, frame_data) -> bool: + """Return true if the frame should be kept""" + raise NotImplementedError + + +class FilterMultiStack(FrameFilter): + """Filter out all but one `StackID`""" + + def __init__(self, keep_id=None): + self._keep_id = keep_id + + def applies(self, dcm_wrp) -> bool: + first_fcs = dcm_wrp.frames[0].get('FrameContentSequence', (None,))[0] + if first_fcs is None or not hasattr(first_fcs, 'StackID'): + return False + stack_ids = {frame.FrameContentSequence[0].StackID for frame in dcm_wrp.frames} + if self._keep_id is not None: + if self._keep_id not in stack_ids: + raise WrapperError('Explicitly requested StackID not found') + self._selected = self._keep_id + if len(stack_ids) > 1: + if self._keep_id is None: + warnings.warn( + 'A multi-stack file was passed without an explicit filter, just using lowest StackID' + ) + self._selected = sorted(stack_ids)[0] + return True + return False + + def keep(self, frame) -> bool: + return frame.FrameContentSequence[0].StackID == self._selected + + +class FilterDwiIso(FrameFilter): + """Filter out derived ISOTROPIC frames from DWI series""" + + def applies(self, dcm_wrp) -> bool: + if not hasattr(dcm_wrp.frames[0], 'MRDiffusionSequence'): + return False + diff_dirs = { + f.MRDiffusionSequence[0].get('DiffusionDirectionality') for f in dcm_wrp.frames + } + if len(diff_dirs) > 1 and 'ISOTROPIC' in diff_dirs: + warnings.warn('Derived images found and removed') + return True + return False + + def keep(self, frame) -> bool: + return frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' + + +DEFUALT_FRAME_FILTERS = (FilterMultiStack(), FilterDwiIso()) + + class MultiframeWrapper(Wrapper): """Wrapper for Enhanced MR Storage SOP Class @@ -436,17 +623,20 @@ class MultiframeWrapper(Wrapper): Methods ------- + vendor(self) + frame_order(self) image_shape(self) image_orient_patient(self) voxel_sizes(self) image_position(self) series_signature(self) + scale_factors(self) get_data(self) """ is_multiframe = True - def __init__(self, dcm_data): + def __init__(self, dcm_data, frame_filters=None): """Initializes MultiframeWrapper Parameters @@ -454,10 +644,13 @@ def __init__(self, dcm_data): dcm_data : object object should allow 'get' and '__getitem__' access. Usually this will be a ``dicom.dataset.Dataset`` object resulting from reading a - DICOM file, but a dictionary should also work. + DICOM file. + + frame_filters : Iterable of FrameFilter + defines which frames inside the dataset should be ignored. If None then + `dicomwrappers.DEFAULT_FRAME_FILTERS` will be used. """ Wrapper.__init__(self, dcm_data) - self.dcm_data = dcm_data self.frames = dcm_data.get('PerFrameFunctionalGroupsSequence') try: self.frames[0] @@ -467,8 +660,19 @@ def __init__(self, dcm_data): self.shared = dcm_data.get('SharedFunctionalGroupsSequence')[0] except TypeError: raise WrapperError('SharedFunctionalGroupsSequence is empty.') + # Apply frame filters one at a time in the order provided + if frame_filters is None: + frame_filters = DEFUALT_FRAME_FILTERS + frame_filters = [filt for filt in frame_filters if filt.applies(self)] + for filt in frame_filters: + self.frames = [f for f in self.frames if filt.keep(f)] + # Make sure there is only one StackID remaining + first_fcs = self.frames[0].get('FrameContentSequence', (None,))[0] + if first_fcs is not None and hasattr(first_fcs, 'StackID'): + if len({frame.FrameContentSequence[0].StackID for frame in self.frames}) > 1: + raise WrapperError('More than one StackID remains after filtering') # Try to determine slice order and minimal image position patient - self._frame_slc_ord = self._ipp = None + self._frame_slc_ord = self._ipp = self._slice_spacing = None try: frame_ipps = [f.PlanePositionSequence[0].ImagePositionPatient for f in self.frames] except AttributeError: @@ -485,8 +689,29 @@ def __init__(self, dcm_data): val: order for val, order in zip(uniq_slc_pos, np.argsort(uniq_slc_pos)) } self._frame_slc_ord = [pos_ord_map[pos] for pos in rnd_slc_pos] + if len(self._frame_slc_ord) > 1: + self._slice_spacing = ( + frame_slc_pos[self._frame_slc_ord[1]] - frame_slc_pos[self._frame_slc_ord[0]] + ) self._ipp = frame_ipps[np.argmin(frame_slc_pos)] - self._shape = None + self._frame_indices = None + + @cached_property + def vendor(self): + """The vendor of the instrument that produced the DICOM""" + vendor = super().vendor + if vendor is not None: + return vendor + vendor = vendor_from_private(self.shared) + if vendor is not None: + return vendor + return vendor_from_private(self.frames[0]) + + @cached_property + def frame_order(self): + if self._frame_indices is None: + _ = self.image_shape + return np.lexsort(self._frame_indices.T) @cached_property def image_shape(self): @@ -519,68 +744,20 @@ def image_shape(self): rows, cols = self.get('Rows'), self.get('Columns') if None in (rows, cols): raise WrapperError('Rows and/or Columns are empty.') - - # Check number of frames - first_frame = self.frames[0] - n_frames = self.get('NumberOfFrames') - # some Philips may have derived images appended - has_derived = False - if hasattr(first_frame, 'get') and first_frame.get([0x18, 0x9117]): - # DWI image may include derived isotropic, ADC or trace volume - try: - aniso_frames = pydicom.Sequence() - aniso_slc_ord = [] - for slc_ord, frame in zip(self._frame_slc_ord, self.frames): - if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC': - aniso_frames.append(frame) - aniso_slc_ord.append(slc_ord) - # Image contains DWI volumes followed by derived images; remove derived images - if len(aniso_frames) != 0: - self.frames = aniso_frames - self._frame_slc_ord = aniso_slc_ord - except IndexError: - # Sequence tag is found but missing items! - raise WrapperError('Diffusion file missing information') - except AttributeError: - # DiffusionDirectionality tag is not required - pass - else: - if n_frames != len(self.frames): - warnings.warn('Derived images found and removed') - n_frames = len(self.frames) - has_derived = True - - assert len(self.frames) == n_frames - frame_indices = np.array( - [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] - ) - # Check that there is only one multiframe stack index - stack_ids = {frame.FrameContentSequence[0].StackID for frame in self.frames} - if len(stack_ids) > 1: - raise WrapperError( - 'File contains more than one StackID. Cannot handle multi-stack files' + # Check number of frames, initialize array of frame indices + n_frames = len(self.frames) + try: + frame_indices = np.array( + [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] ) - # Determine if one of the dimension indices refers to the stack id - dim_seq = [dim.DimensionIndexPointer for dim in self.get('DimensionIndexSequence')] - stackid_tag = pydicom.datadict.tag_for_keyword('StackID') - # remove the stack id axis if present - if stackid_tag in dim_seq: - stackid_dim_idx = dim_seq.index(stackid_tag) - frame_indices = np.delete(frame_indices, stackid_dim_idx, axis=1) - dim_seq.pop(stackid_dim_idx) - if has_derived: - # derived volume is included - derived_tag = pydicom.datadict.tag_for_keyword('DiffusionBValue') - if derived_tag not in dim_seq: - raise WrapperError('Missing information, cannot remove indices with confidence.') - derived_dim_idx = dim_seq.index(derived_tag) - frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) - dim_seq.pop(derived_dim_idx) + except AttributeError: + raise WrapperError("Can't find frame 'DimensionIndexValues'") # Determine the shape and which indices to use shape = [rows, cols] curr_parts = n_frames frames_per_part = 1 del_indices = {} + dim_seq = [dim.DimensionIndexPointer for dim in self.get('DimensionIndexSequence')] stackpos_tag = pydicom.datadict.tag_for_keyword('InStackPositionNumber') slice_dim_idx = dim_seq.index(stackpos_tag) for row_idx, row in enumerate(frame_indices.T): @@ -684,12 +861,15 @@ def voxel_sizes(self): except AttributeError: raise WrapperError('Not enough data for pixel spacing') pix_space = pix_measures.PixelSpacing - try: - zs = pix_measures.SliceThickness - except AttributeError: - zs = self.get('SpacingBetweenSlices') - if zs is None: - raise WrapperError('Not enough data for slice thickness') + if self._slice_spacing is not None: + zs = self._slice_spacing + else: + try: + zs = pix_measures.SliceThickness + except AttributeError: + zs = self.get('SpacingBetweenSlices') + if zs is None: + raise WrapperError('Not enough data for slice thickness') # Ensure values are float rather than Decimal return tuple(map(float, list(pix_space) + [zs])) @@ -710,27 +890,63 @@ def series_signature(self): signature['vox'] = (self.voxel_sizes, none_or_close) return signature - def get_data(self): + @cached_property + def scale_factors(self): + """Return `(2, N)` array of slope/intercept pairs + + If there is a single global scale factor then `N` will be one, otherwise it will + be the number of frames + """ + # Look for shared / global RWV scale factor first + shared_scale = self._get_rwv_scale_factor(self.shared) + if shared_scale is not None: + return np.array([shared_scale]) + shared_scale = self._get_rwv_scale_factor(self.dcm_data) + if shared_scale is not None: + return np.array([shared_scale]) + # Try pulling out best scale factors from each individual frame + frame_scales = [self._get_best_scale_factor(f) for f in self.frames] + if any(s is not None for s in frame_scales): + if any(s is None for s in frame_scales): + if self.vendor == Vendor.PHILIPS: + warnings.warn( + 'Unable to find Philips private scale factor, cross-series comparisons may be invalid' + ) + frame_scales = [s if s is not None else (1, 0) for s in frame_scales] + if all(s == frame_scales[0] for s in frame_scales[1:]): + return np.array([frame_scales[0]]) + return np.array(frame_scales)[self.frame_order] + # Finally look for shared non-RWV scale factors + shared_scale = self._get_best_scale_factor(self.shared) + if shared_scale is not None: + return np.array([shared_scale]) + shared_scale = self._get_best_scale_factor(self.dcm_data) + if shared_scale is None: + if self.vendor == Vendor.PHILIPS: + warnings.warn( + 'Unable to find Philips private scale factor, cross-series comparisons may be invalid' + ) + shared_scale = (1, 0) + return np.array([shared_scale]) + + def get_unscaled_data(self): shape = self.image_shape if shape is None: raise WrapperError('No valid information for image shape') data = self.get_pixel_array() - # Roll frames axis to last + # Roll frames axis to last and reorder if len(data.shape) > 2: - data = data.transpose((1, 2, 0)) - # Sort frames with first index changing fastest, last slowest - sorted_indices = np.lexsort(self._frame_indices.T) - data = data[..., sorted_indices] - data = data.reshape(shape, order='F') - return self._scale_data(data) + data = data.transpose((1, 2, 0))[..., self.frame_order] + return data.reshape(shape, order='F') def _scale_data(self, data): - pix_trans = getattr(self.frames[0], 'PixelValueTransformationSequence', None) - if pix_trans is None: - return super()._scale_data(data) - scale = float(pix_trans[0].RescaleSlope) - offset = float(pix_trans[0].RescaleIntercept) - return self._apply_scale_offset(data, scale, offset) + scale_factors = self.scale_factors + if scale_factors.shape[0] == 1: + scale, offset = scale_factors[0] + return self._apply_scale_offset(data, scale, offset) + orig_shape = data.shape + data = data.reshape(data.shape[:2] + (len(self.frames),)) + return (data * scale_factors[:, 0] + scale_factors[:, 1]).reshape(orig_shape) class SiemensWrapper(Wrapper): @@ -757,7 +973,7 @@ def __init__(self, dcm_data, csa_header=None): object should allow 'get' and '__getitem__' access. If `csa_header` is None, it should also be possible to extract a CSA header from `dcm_data`. Usually this will be a ``dicom.dataset.Dataset`` object - resulting from reading a DICOM file. A dict should also work. + resulting from reading a DICOM file. csa_header : None or mapping, optional mapping giving values for Siemens CSA image sub-header. If None, we try and read the CSA information from `dcm_data`. @@ -773,6 +989,11 @@ def __init__(self, dcm_data, csa_header=None): csa_header = {} self.csa_header = csa_header + @cached_property + def vendor(self): + """The vendor of the instrument that produced the DICOM""" + return Vendor.SIEMENS + @cached_property def slice_normal(self): # The std_slice_normal comes from the cross product of the directions @@ -964,7 +1185,7 @@ def image_position(self): Q = np.fliplr(iop) * pix_spacing return ipp + np.dot(Q, vox_trans_fixes[:, None]).ravel() - def get_data(self): + def get_unscaled_data(self): """Get scaled image data from DICOMs Resorts data block from mosaic to 3D @@ -1007,8 +1228,7 @@ def get_data(self): # pool mosaic-generated dims v3 = v4.reshape((n_slice_rows, n_slice_cols, n_blocks)) # delete any padding slices - v3 = v3[..., :n_mosaic] - return self._scale_data(v3) + return v3[..., :n_mosaic] def none_or_close(val1, val2, rtol=1e-5, atol=1e-6): diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index e01759c86a..0556fc63cc 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -1,7 +1,7 @@ """Testing DICOM wrappers""" import gzip -from copy import copy +from copy import deepcopy from decimal import Decimal from hashlib import sha1 from os.path import dirname @@ -11,6 +11,7 @@ import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal +from pydicom.dataset import Dataset from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data from ...volumeutils import endian_codes @@ -63,8 +64,8 @@ def test_wrappers(): # test direct wrapper calls # first with empty or minimal data multi_minimal = { - 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None], + 'PerFrameFunctionalGroupsSequence': [Dataset()], + 'SharedFunctionalGroupsSequence': [Dataset()], } for maker, args in ( (didw.Wrapper, ({},)), @@ -163,10 +164,10 @@ def test_wrapper_from_data(): fake_data['SOPClassUID'] = '1.2.840.10008.5.1.4.1.1.4.1' with pytest.raises(didw.WrapperError): didw.wrapper_from_data(fake_data) - fake_data['PerFrameFunctionalGroupsSequence'] = [None] + fake_data['PerFrameFunctionalGroupsSequence'] = [Dataset()] with pytest.raises(didw.WrapperError): didw.wrapper_from_data(fake_data) - fake_data['SharedFunctionalGroupsSequence'] = [None] + fake_data['SharedFunctionalGroupsSequence'] = [Dataset()] # minimal set should now be met dw = didw.wrapper_from_data(fake_data) assert dw.is_multiframe @@ -384,16 +385,17 @@ def fake_frames(seq_name, field_name, value_seq, frame_seq=None): each element in list is obj.[0]. = value_seq[n] for n in range(N) """ - - class Fake: - pass - if frame_seq is None: - frame_seq = [Fake() for _ in range(len(value_seq))] + frame_seq = [Dataset() for _ in range(len(value_seq))] for value, fake_frame in zip(value_seq, frame_seq): - fake_element = Fake() + if value is None: + continue + if hasattr(fake_frame, seq_name): + fake_element = getattr(fake_frame, seq_name)[0] + else: + fake_element = Dataset() + setattr(fake_frame, seq_name, [fake_element]) setattr(fake_element, field_name, value) - setattr(fake_frame, seq_name, [fake_element]) return frame_seq @@ -434,27 +436,32 @@ def __repr__(self): attr_strs.append(f'{attr}={getattr(self, attr)}') return f"{self.__class__.__name__}({', '.join(attr_strs)})" - class DimIdxSeqElem(PrintBase): + class DimIdxSeqElem(Dataset): def __init__(self, dip=(0, 0), fgp=None): + super().__init__() self.DimensionIndexPointer = dip if fgp is not None: self.FunctionalGroupPointer = fgp - class FrmContSeqElem(PrintBase): + class FrmContSeqElem(Dataset): def __init__(self, div, sid): + super().__init__() self.DimensionIndexValues = div self.StackID = sid - class PlnPosSeqElem(PrintBase): + class PlnPosSeqElem(Dataset): def __init__(self, ipp): + super().__init__() self.ImagePositionPatient = ipp - class PlnOrientSeqElem(PrintBase): + class PlnOrientSeqElem(Dataset): def __init__(self, iop): + super().__init__() self.ImageOrientationPatient = iop - class PerFrmFuncGrpSeqElem(PrintBase): + class PerFrmFuncGrpSeqElem(Dataset): def __init__(self, div, sid, ipp, iop): + super().__init__() self.FrameContentSequence = [FrmContSeqElem(div, sid)] self.PlanePositionSequence = [PlnPosSeqElem(ipp)] self.PlaneOrientationSequence = [PlnOrientSeqElem(iop)] @@ -473,7 +480,7 @@ def __init__(self, div, sid, ipp, iop): frame_slc_indices = np.array(div_seq)[:, slice_dim] uniq_slc_indices = np.unique(frame_slc_indices) n_slices = len(uniq_slc_indices) - iop_seq = [(0.0, 1.0, 0.0, 1.0, 0.0, 0.0) for _ in range(num_of_frames)] + iop_seq = [[0.0, 1.0, 0.0, 1.0, 0.0, 0.0] for _ in range(num_of_frames)] if ipp_seq is None: slc_locs = np.linspace(-1.0, 1.0, n_slices) if flip_ipp_idx_corr: @@ -481,7 +488,7 @@ def __init__(self, div, sid, ipp, iop): slc_idx_loc = { div_idx: slc_locs[arr_idx] for arr_idx, div_idx in enumerate(np.sort(uniq_slc_indices)) } - ipp_seq = [(-1.0, -1.0, slc_idx_loc[idx]) for idx in frame_slc_indices] + ipp_seq = [[-1.0, -1.0, slc_idx_loc[idx]] for idx in frame_slc_indices] else: assert flip_ipp_idx_corr is False # caller can flip it themselves assert len(ipp_seq) == num_of_frames @@ -507,38 +514,37 @@ def __init__(self, div, sid, ipp, iop): } +class FakeDataset(Dataset): + pixel_array = None + + class TestMultiFrameWrapper(TestCase): # Test MultiframeWrapper - MINIMAL_MF = { - # Minimal contents of dcm_data for this wrapper - 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None], - } + # Minimal contents of dcm_data for this wrapper + MINIMAL_MF = FakeDataset() + MINIMAL_MF.PerFrameFunctionalGroupsSequence = [Dataset()] + MINIMAL_MF.SharedFunctionalGroupsSequence = [Dataset()] WRAPCLASS = didw.MultiframeWrapper @dicom_test def test_shape(self): # Check the shape algorithm - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) # No rows, cols, raise WrapperError with pytest.raises(didw.WrapperError): dw.image_shape - fake_mf['Rows'] = 64 + fake_mf.Rows = 64 with pytest.raises(didw.WrapperError): dw.image_shape fake_mf.pop('Rows') - fake_mf['Columns'] = 64 + fake_mf.Columns = 64 with pytest.raises(didw.WrapperError): dw.image_shape - fake_mf['Rows'] = 32 - # Missing frame data, raise AssertionError - with pytest.raises(AssertionError): - dw.image_shape - fake_mf['NumberOfFrames'] = 4 - # PerFrameFunctionalGroupsSequence does not match NumberOfFrames - with pytest.raises(AssertionError): + fake_mf.Rows = 32 + # No frame data raises WrapperError + with pytest.raises(didw.WrapperError): dw.image_shape # check 2D shape with StackID index is 0 div_seq = ((1, 1),) @@ -556,11 +562,32 @@ def test_shape(self): div_seq = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 4) - # Check stack number matching when StackID index is 0 + # Check fow warning when implicitly dropping stacks div_seq = ((1, 1), (1, 2), (1, 3), (2, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + assert MFW(fake_mf).image_shape == (32, 64, 3) + # No warning if we expclitly select that StackID to keep + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(1),)).image_shape == (32, 64, 3) + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(2),)).image_shape == (32, 64) + # Stack filtering is the same when StackID is not an index + div_seq = ((1,), (2,), (3,), (4,)) + sid_seq = (1, 1, 1, 2) + fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + assert MFW(fake_mf).image_shape == (32, 64, 3) + # No warning if we expclitly select that StackID to keep + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(1),)).image_shape == (32, 64, 3) + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(2),)).image_shape == (32, 64) + # Check for error when explicitly requested StackID is missing with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + MFW(fake_mf, frame_filters=(didw.FilterMultiStack(3),)) # Make some fake frame data for 4D when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) @@ -568,8 +595,12 @@ def test_shape(self): # Check stack number matching for 4D when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (2, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # Check indices can be non-contiguous when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) @@ -579,7 +610,7 @@ def test_shape(self): fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 2, 2) # Check number of IPP vals match the number of slices or we raise - frames = fake_mf['PerFrameFunctionalGroupsSequence'] + frames = fake_mf.PerFrameFunctionalGroupsSequence for frame in frames[1:]: frame.PlanePositionSequence = frames[0].PlanePositionSequence[:] with pytest.raises(didw.WrapperError): @@ -594,12 +625,6 @@ def test_shape(self): sid_seq = (1, 1, 1, 1) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) assert MFW(fake_mf).image_shape == (32, 64, 4) - # check 3D stack number matching when there is no StackID index - div_seq = ((1,), (2,), (3,), (4,)) - sid_seq = (1, 1, 1, 2) - fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape # check 4D shape when there is no StackID index div_seq = ((1, 1), (2, 1), (1, 2), (2, 2), (1, 3), (2, 3)) sid_seq = (1, 1, 1, 1, 1, 1) @@ -609,8 +634,12 @@ def test_shape(self): div_seq = ((1, 1), (2, 1), (1, 2), (2, 2), (1, 3), (2, 3)) sid_seq = (1, 1, 1, 1, 1, 2) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # check 3D shape when StackID index is 1 div_seq = ((1, 1), (2, 1), (3, 1), (4, 1)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) @@ -618,8 +647,11 @@ def test_shape(self): # Check stack number matching when StackID index is 1 div_seq = ((1, 1), (2, 1), (3, 2), (4, 1)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + assert MFW(fake_mf).image_shape == (32, 64, 3) # Make some fake frame data for 4D when StackID index is 1 div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), (1, 1, 3), (2, 1, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) @@ -689,7 +721,7 @@ def test_shape(self): def test_iop(self): # Test Image orient patient for multiframe - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) with pytest.raises(didw.WrapperError): @@ -698,56 +730,56 @@ def test_iop(self): fake_frame = fake_frames( 'PlaneOrientationSequence', 'ImageOrientationPatient', [[0, 1, 0, 1, 0, 0]] )[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] + fake_mf.SharedFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) - fake_mf['SharedFunctionalGroupsSequence'] = [None] + fake_mf.SharedFunctionalGroupsSequence = [Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_orient_patient - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] + fake_mf.PerFrameFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) def test_voxel_sizes(self): # Test voxel size calculation - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) with pytest.raises(didw.WrapperError): dw.voxel_sizes # Make a fake frame fake_frame = fake_frames('PixelMeasuresSequence', 'PixelSpacing', [[2.1, 3.2]])[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] + fake_mf.SharedFunctionalGroupsSequence = [fake_frame] # Still not enough, we lack information for slice distances with pytest.raises(didw.WrapperError): MFW(fake_mf).voxel_sizes # This can come from SpacingBetweenSlices or frame SliceThickness - fake_mf['SpacingBetweenSlices'] = 4.3 + fake_mf.SpacingBetweenSlices = 4.3 assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 4.3]) # If both, prefer SliceThickness fake_frame.PixelMeasuresSequence[0].SliceThickness = 5.4 assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Just SliceThickness is OK - del fake_mf['SpacingBetweenSlices'] + del fake_mf.SpacingBetweenSlices assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Removing shared leads to error again - fake_mf['SharedFunctionalGroupsSequence'] = [None] + fake_mf.SharedFunctionalGroupsSequence = [Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).voxel_sizes # Restoring to frames makes it work again - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] + fake_mf.PerFrameFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Decimals in any field are OK fake_frame = fake_frames( 'PixelMeasuresSequence', 'PixelSpacing', [[Decimal('2.1'), Decimal('3.2')]] )[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] - fake_mf['SpacingBetweenSlices'] = Decimal('4.3') + fake_mf.SharedFunctionalGroupsSequence = [fake_frame] + fake_mf.SpacingBetweenSlices = Decimal('4.3') assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 4.3]) fake_frame.PixelMeasuresSequence[0].SliceThickness = Decimal('5.4') assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) def test_image_position(self): # Test image_position property for multiframe - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) with pytest.raises(didw.WrapperError): @@ -758,12 +790,12 @@ def test_image_position(self): frames = fake_frames( 'PlanePositionSequence', 'ImagePositionPatient', [[-2.0, 3.0, 7]], frames ) - fake_mf['SharedFunctionalGroupsSequence'] = frames + fake_mf.SharedFunctionalGroupsSequence = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) - fake_mf['SharedFunctionalGroupsSequence'] = [None] + fake_mf.SharedFunctionalGroupsSequence = [Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_position - fake_mf['PerFrameFunctionalGroupsSequence'] = frames + fake_mf.PerFrameFunctionalGroupsSequence = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) # Check lists of Decimals work frames[0].PlanePositionSequence[0].ImagePositionPatient = [ @@ -775,7 +807,7 @@ def test_image_position(self): frames = fake_frames('PlaneOrientationSequence', 'ImageOrientationPatient', [iop] * 2) ipps = [[-2.0, 3.0, 7], [-2.0, 3.0, 6]] frames = fake_frames('PlanePositionSequence', 'ImagePositionPatient', ipps, frames) - fake_mf['PerFrameFunctionalGroupsSequence'] = frames + fake_mf.PerFrameFunctionalGroupsSequence = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 6]) @dicom_test @@ -809,9 +841,9 @@ def test_slicethickness_fallback(self): def test_data_derived_shape(self): # Test 4D diffusion data with an additional trace volume included # Excludes the trace volume and generates the correct shape - dw = didw.wrapper_from_file(DATA_FILE_4D_DERIVED) with pytest.warns(UserWarning, match='Derived images found and removed'): - assert dw.image_shape == (96, 96, 60, 33) + dw = didw.wrapper_from_file(DATA_FILE_4D_DERIVED) + assert dw.image_shape == (96, 96, 60, 33) @dicom_test @needs_nibabel_data('dcm_qa_xa30') @@ -831,7 +863,7 @@ def test_data_unreadable_private_headers(self): @dicom_test def test_data_fake(self): # Test algorithm for get_data - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) # Fails - no shape @@ -843,8 +875,8 @@ def test_data_fake(self): with pytest.raises(didw.WrapperError): dw.get_data() # Make shape and indices - fake_mf['Rows'] = 2 - fake_mf['Columns'] = 3 + fake_mf.Rows = 2 + fake_mf.Columns = 3 dim_idxs = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0)) assert MFW(fake_mf).image_shape == (2, 3, 4) @@ -854,24 +886,24 @@ def test_data_fake(self): # Add data - 3D data = np.arange(24).reshape((2, 3, 4)) # Frames dim is first for some reason - fake_mf['pixel_array'] = np.rollaxis(data, 2) + object.__setattr__(fake_mf, 'pixel_array', np.rollaxis(data, 2)) # Now it should work dw = MFW(fake_mf) assert_array_equal(dw.get_data(), data) # Test scaling works - fake_mf['RescaleSlope'] = 2.0 - fake_mf['RescaleIntercept'] = -1 + fake_mf.RescaleSlope = 2.0 + fake_mf.RescaleIntercept = -1 assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) # Check slice sorting dim_idxs = ((1, 4), (1, 2), (1, 3), (1, 1)) fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0)) sorted_data = data[..., [3, 1, 2, 0]] - fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) + fake_mf.pixel_array = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) # Check slice sorting with negative index / IPP correlation fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0, flip_ipp_idx_corr=True)) sorted_data = data[..., [0, 2, 1, 3]] - fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) + fake_mf.pixel_array = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) # 5D! dim_idxs = [ @@ -898,28 +930,167 @@ def test_data_fake(self): sorted_data = data.reshape(shape[:2] + (-1,), order='F') order = [11, 9, 10, 8, 3, 1, 2, 0, 15, 13, 14, 12, 7, 5, 6, 4] sorted_data = sorted_data[..., np.argsort(order)] - fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) + fake_mf.pixel_array = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) - def test__scale_data(self): + def test_scale_data(self): # Test data scaling - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) + fake_mf.Rows = 2 + fake_mf.Columns = 3 + fake_mf.PerFrameFunctionalGroupsSequence = [Dataset() for _ in range(4)] MFW = self.WRAPCLASS - dw = MFW(fake_mf) - data = np.arange(24).reshape((2, 3, 4)) - assert_array_equal(data, dw._scale_data(data)) - fake_mf['RescaleSlope'] = 2.0 - fake_mf['RescaleIntercept'] = -1.0 - assert_array_equal(data * 2 - 1, dw._scale_data(data)) - fake_frame = fake_frames('PixelValueTransformationSequence', 'RescaleSlope', [3.0])[0] - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] - # Lacking RescaleIntercept -> Error - dw = MFW(fake_mf) - with pytest.raises(AttributeError): - dw._scale_data(data) - fake_frame.PixelValueTransformationSequence[0].RescaleIntercept = -2 - assert_array_equal(data * 3 - 2, dw._scale_data(data)) + data = np.arange(24).reshape((2, 3, 4), order='F') + assert_array_equal(data, MFW(fake_mf)._scale_data(data)) + # Test legacy top-level slope/intercept + fake_mf.RescaleSlope = 2.0 + fake_mf.RescaleIntercept = -1.0 + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + # RealWorldValueMapping takes precedence, but only with defined units + fake_mf.RealWorldValueMappingSequence = [Dataset()] + fake_mf.RealWorldValueMappingSequence[0].RealWorldValueSlope = 10.0 + fake_mf.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -5.0 + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [Dataset()] + fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) + fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[ + 0 + ].CodeMeaning = 'no units' + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + # Possible to have more than one RealWorldValueMapping, use first one with defined units + fake_mf.RealWorldValueMappingSequence.append(Dataset()) + fake_mf.RealWorldValueMappingSequence[-1].RealWorldValueSlope = 15.0 + fake_mf.RealWorldValueMappingSequence[-1].RealWorldValueIntercept = -3.0 + fake_mf.RealWorldValueMappingSequence[-1].MeasurementUnitsCodeSequence = [Dataset()] + fake_mf.RealWorldValueMappingSequence[-1].MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + assert_array_equal(data * 15 - 3, MFW(fake_mf)._scale_data(data)) + # A global RWV scale takes precedence over per-frame PixelValueTransformation + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + frames = fake_frames( + 'PixelValueTransformationSequence', + 'RescaleSlope', + [3.0, 3.0, 3.0, 3.0], + fake_mf.PerFrameFunctionalGroupsSequence, + ) + assert_array_equal(data * 15 - 3, MFW(fake_mf)._scale_data(data)) + # The per-frame PixelValueTransformation takes precedence over plain top-level slope / inter + delattr(fake_mf, 'RealWorldValueMappingSequence') + assert_array_equal(data * 3, MFW(fake_mf)._scale_data(data)) + for frame in frames: + frame.PixelValueTransformationSequence[0].RescaleIntercept = -2 + assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) # Decimals are OK - fake_frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal('3') - fake_frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal('-2') - assert_array_equal(data * 3 - 2, dw._scale_data(data)) + for frame in frames: + frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal('3') + frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal('-2') + assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) + # A per-frame RWV scaling takes precedence over per-frame PixelValueTransformation + for frame in frames: + frame.RealWorldValueMappingSequence = [Dataset()] + frame.RealWorldValueMappingSequence[0].RealWorldValueSlope = 10.0 + frame.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -5.0 + frame.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [Dataset()] + frame.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[ + 0 + ].CodeMeaning = '%' + assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) + # Test varying per-frame scale factors + for frame_idx, frame in enumerate(frames): + frame.RealWorldValueMappingSequence[0].RealWorldValueSlope = 2 * (frame_idx + 1) + frame.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -1 * (frame_idx + 1) + assert_array_equal( + data * np.array([2, 4, 6, 8]) + np.array([-1, -2, -3, -4]), + MFW(fake_mf)._scale_data(data), + ) + + def test_philips_scale_data(self): + fake_mf = deepcopy(self.MINIMAL_MF) + fake_mf.Manufacturer = 'Philips' + fake_mf.Rows = 2 + fake_mf.Columns = 3 + fake_mf.PerFrameFunctionalGroupsSequence = [Dataset() for _ in range(4)] + MFW = self.WRAPCLASS + data = np.arange(24).reshape((2, 3, 4), order='F') + # Unlike other manufacturers, public scale factors from Philips without defined + # units should not be used. In lieu of this the private scale factor should be + # used, which should always be available (modulo deidentification). If we can't + # find any of these scale factors a warning is issued. + with pytest.warns( + UserWarning, + match='Unable to find Philips private scale factor, cross-series comparisons may be invalid', + ): + assert_array_equal(data, MFW(fake_mf)._scale_data(data)) + fake_mf.RescaleSlope = 2.0 + fake_mf.RescaleIntercept = -1.0 + for rescale_type in (None, '', 'US', 'normalized'): + if rescale_type is not None: + fake_mf.RescaleType = rescale_type + with pytest.warns( + UserWarning, + match='Unable to find Philips private scale factor, cross-series comparisons may be invalid', + ): + assert_array_equal(data, MFW(fake_mf)._scale_data(data)) + # Falling back to private scaling doesn't generate error + priv_block = fake_mf.private_block(0x2005, 'Philips MR Imaging DD 001', create=True) + priv_block.add_new(0xE, 'FL', 3.0) + assert_array_equal(data * 3.0, MFW(fake_mf)._scale_data(data)) + # If the units are defined they take precedence over private scaling + fake_mf.RescaleType = 'mrad' + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + # A RWV scale factor with defined units takes precdence + shared = Dataset() + fake_mf.SharedFunctionalGroupsSequence = [shared] + rwv_map = Dataset() + rwv_map.RealWorldValueSlope = 10.0 + rwv_map.RealWorldValueIntercept = -5.0 + rwv_map.MeasurementUnitsCodeSequence = [Dataset()] + rwv_map.MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + shared.RealWorldValueMappingSequence = [rwv_map] + assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) + # Get rid of valid top-level scale factors, test per-frame scale factors + delattr(shared, 'RealWorldValueMappingSequence') + delattr(fake_mf, 'RescaleType') + del fake_mf[priv_block.get_tag(0xE)] + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + # Simplest case is all frames have same (valid) scale factor + for frame in fake_mf.PerFrameFunctionalGroupsSequence: + pix_trans = Dataset() + pix_trans.RescaleSlope = 2.5 + pix_trans.RescaleIntercept = -4 + pix_trans.RescaleType = 'mrad' + frame.PixelValueTransformationSequence = [pix_trans] + assert_array_equal(data * 2.5 - 4, MFW(fake_mf)._scale_data(data)) + # If some frames are missing valid scale factors we should get a warning + for frame in fake_mf.PerFrameFunctionalGroupsSequence[2:]: + delattr(frame.PixelValueTransformationSequence[0], 'RescaleType') + with pytest.warns( + UserWarning, + match='Unable to find Philips private scale factor, cross-series comparisons may be invalid', + ): + assert_array_equal( + data * np.array([2.5, 2.5, 1, 1]) + np.array([-4, -4, 0, 0]), + MFW(fake_mf)._scale_data(data), + ) + # We can fall back to private scale factor on frame-by-frame basis + for frame in fake_mf.PerFrameFunctionalGroupsSequence: + priv_block = frame.private_block(0x2005, 'Philips MR Imaging DD 001', create=True) + priv_block.add_new(0xE, 'FL', 7.0) + assert_array_equal( + data * np.array([2.5, 2.5, 7, 7]) + np.array([-4, -4, 0, 0]), + MFW(fake_mf)._scale_data(data), + ) + # Again RWV scale factors take precedence + for frame_idx, frame in enumerate(fake_mf.PerFrameFunctionalGroupsSequence): + rwv_map = Dataset() + rwv_map.RealWorldValueSlope = 14.0 - frame_idx + rwv_map.RealWorldValueIntercept = 5.0 + rwv_map.MeasurementUnitsCodeSequence = [Dataset()] + rwv_map.MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + frame.RealWorldValueMappingSequence = [rwv_map] + assert_array_equal( + data * np.array([14, 13, 12, 11]) + np.array([5, 5, 5, 5]), + MFW(fake_mf)._scale_data(data), + ) diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 24f4afc2fe..2c01c9d161 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,5 +1,7 @@ """Utilities for working with DICOM datasets""" +from enum import Enum + def find_private_section(dcm_data, group_no, creator): """Return start element in group `group_no` given creator name `creator` @@ -45,3 +47,55 @@ def find_private_section(dcm_data, group_no, creator): if match_func(val): return elno * 0x100 return None + + +class Vendor(Enum): + SIEMENS = 1 + GE = 2 + PHILIPS = 3 + + +vendor_priv_sections = { + Vendor.SIEMENS: [ + (0x9, 'SIEMENS SYNGO INDEX SERVICE'), + (0x19, 'SIEMENS MR HEADER'), + (0x21, 'SIEMENS MR SDR 01'), + (0x21, 'SIEMENS MR SDS 01'), + (0x21, 'SIEMENS MR SDI 02'), + (0x29, 'SIEMENS CSA HEADER'), + (0x29, 'SIEMENS MEDCOM HEADER2'), + (0x51, 'SIEMENS MR HEADER'), + ], + Vendor.PHILIPS: [ + (0x2001, 'Philips Imaging DD 001'), + (0x2001, 'Philips Imaging DD 002'), + (0x2001, 'Philips Imaging DD 129'), + (0x2005, 'Philips MR Imaging DD 001'), + (0x2005, 'Philips MR Imaging DD 002'), + (0x2005, 'Philips MR Imaging DD 003'), + (0x2005, 'Philips MR Imaging DD 004'), + (0x2005, 'Philips MR Imaging DD 005'), + (0x2005, 'Philips MR Imaging DD 006'), + (0x2005, 'Philips MR Imaging DD 007'), + (0x2005, 'Philips MR Imaging DD 005'), + (0x2005, 'Philips MR Imaging DD 006'), + ], + Vendor.GE: [ + (0x9, 'GEMS_IDEN_01'), + (0x19, 'GEMS_ACQU_01'), + (0x21, 'GEMS_RELA_01'), + (0x23, 'GEMS_STDY_01'), + (0x25, 'GEMS_SERS_01'), + (0x27, 'GEMS_IMAG_01'), + (0x29, 'GEMS_IMPS_01'), + (0x43, 'GEMS_PARM_01'), + ], +} + + +def vendor_from_private(dcm_data): + """Try to determine the vendor by looking for specific private tags""" + for vendor, priv_sections in vendor_priv_sections.items(): + for priv_group, priv_creator in priv_sections: + if find_private_section(dcm_data, priv_group, priv_creator) != None: + return vendor From f0264abbb295e063ea8b66be36d56319a30b2ecb Mon Sep 17 00:00:00 2001 From: Brendan Moloney Date: Mon, 12 Aug 2024 17:14:04 -0700 Subject: [PATCH 608/702] TST: Don't assume pydicom installed in test_dicomwrappers --- nibabel/nicom/tests/test_dicomwrappers.py | 84 +++++++++++++---------- 1 file changed, 48 insertions(+), 36 deletions(-) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 0556fc63cc..55c27df50a 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -11,7 +11,6 @@ import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal -from pydicom.dataset import Dataset from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data from ...volumeutils import endian_codes @@ -64,8 +63,8 @@ def test_wrappers(): # test direct wrapper calls # first with empty or minimal data multi_minimal = { - 'PerFrameFunctionalGroupsSequence': [Dataset()], - 'SharedFunctionalGroupsSequence': [Dataset()], + 'PerFrameFunctionalGroupsSequence': [pydicom.Dataset()], + 'SharedFunctionalGroupsSequence': [pydicom.Dataset()], } for maker, args in ( (didw.Wrapper, ({},)), @@ -164,10 +163,10 @@ def test_wrapper_from_data(): fake_data['SOPClassUID'] = '1.2.840.10008.5.1.4.1.1.4.1' with pytest.raises(didw.WrapperError): didw.wrapper_from_data(fake_data) - fake_data['PerFrameFunctionalGroupsSequence'] = [Dataset()] + fake_data['PerFrameFunctionalGroupsSequence'] = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): didw.wrapper_from_data(fake_data) - fake_data['SharedFunctionalGroupsSequence'] = [Dataset()] + fake_data['SharedFunctionalGroupsSequence'] = [pydicom.Dataset()] # minimal set should now be met dw = didw.wrapper_from_data(fake_data) assert dw.is_multiframe @@ -386,14 +385,14 @@ def fake_frames(seq_name, field_name, value_seq, frame_seq=None): value_seq[n] for n in range(N) """ if frame_seq is None: - frame_seq = [Dataset() for _ in range(len(value_seq))] + frame_seq = [pydicom.Dataset() for _ in range(len(value_seq))] for value, fake_frame in zip(value_seq, frame_seq): if value is None: continue if hasattr(fake_frame, seq_name): fake_element = getattr(fake_frame, seq_name)[0] else: - fake_element = Dataset() + fake_element = pydicom.Dataset() setattr(fake_frame, seq_name, [fake_element]) setattr(fake_element, field_name, value) return frame_seq @@ -436,30 +435,30 @@ def __repr__(self): attr_strs.append(f'{attr}={getattr(self, attr)}') return f"{self.__class__.__name__}({', '.join(attr_strs)})" - class DimIdxSeqElem(Dataset): + class DimIdxSeqElem(pydicom.Dataset): def __init__(self, dip=(0, 0), fgp=None): super().__init__() self.DimensionIndexPointer = dip if fgp is not None: self.FunctionalGroupPointer = fgp - class FrmContSeqElem(Dataset): + class FrmContSeqElem(pydicom.Dataset): def __init__(self, div, sid): super().__init__() self.DimensionIndexValues = div self.StackID = sid - class PlnPosSeqElem(Dataset): + class PlnPosSeqElem(pydicom.Dataset): def __init__(self, ipp): super().__init__() self.ImagePositionPatient = ipp - class PlnOrientSeqElem(Dataset): + class PlnOrientSeqElem(pydicom.Dataset): def __init__(self, iop): super().__init__() self.ImageOrientationPatient = iop - class PerFrmFuncGrpSeqElem(Dataset): + class PerFrmFuncGrpSeqElem(pydicom.Dataset): def __init__(self, div, sid, ipp, iop): super().__init__() self.FrameContentSequence = [FrmContSeqElem(div, sid)] @@ -514,17 +513,21 @@ def __init__(self, div, sid, ipp, iop): } -class FakeDataset(Dataset): - pixel_array = None +if have_dicom: + + class FakeDataset(pydicom.Dataset): + pixel_array = None class TestMultiFrameWrapper(TestCase): # Test MultiframeWrapper - # Minimal contents of dcm_data for this wrapper - MINIMAL_MF = FakeDataset() - MINIMAL_MF.PerFrameFunctionalGroupsSequence = [Dataset()] - MINIMAL_MF.SharedFunctionalGroupsSequence = [Dataset()] - WRAPCLASS = didw.MultiframeWrapper + + if have_dicom: + # Minimal contents of dcm_data for this wrapper + MINIMAL_MF = FakeDataset() + MINIMAL_MF.PerFrameFunctionalGroupsSequence = [pydicom.Dataset()] + MINIMAL_MF.SharedFunctionalGroupsSequence = [pydicom.Dataset()] + WRAPCLASS = didw.MultiframeWrapper @dicom_test def test_shape(self): @@ -719,6 +722,7 @@ def test_shape(self): fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 3) + @dicom_test def test_iop(self): # Test Image orient patient for multiframe fake_mf = deepcopy(self.MINIMAL_MF) @@ -732,12 +736,13 @@ def test_iop(self): )[0] fake_mf.SharedFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) - fake_mf.SharedFunctionalGroupsSequence = [Dataset()] + fake_mf.SharedFunctionalGroupsSequence = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_orient_patient fake_mf.PerFrameFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) + @dicom_test def test_voxel_sizes(self): # Test voxel size calculation fake_mf = deepcopy(self.MINIMAL_MF) @@ -761,7 +766,7 @@ def test_voxel_sizes(self): del fake_mf.SpacingBetweenSlices assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Removing shared leads to error again - fake_mf.SharedFunctionalGroupsSequence = [Dataset()] + fake_mf.SharedFunctionalGroupsSequence = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).voxel_sizes # Restoring to frames makes it work again @@ -777,6 +782,7 @@ def test_voxel_sizes(self): fake_frame.PixelMeasuresSequence[0].SliceThickness = Decimal('5.4') assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) + @dicom_test def test_image_position(self): # Test image_position property for multiframe fake_mf = deepcopy(self.MINIMAL_MF) @@ -792,7 +798,7 @@ def test_image_position(self): ) fake_mf.SharedFunctionalGroupsSequence = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) - fake_mf.SharedFunctionalGroupsSequence = [Dataset()] + fake_mf.SharedFunctionalGroupsSequence = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_position fake_mf.PerFrameFunctionalGroupsSequence = frames @@ -933,12 +939,13 @@ def test_data_fake(self): fake_mf.pixel_array = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) + @dicom_test def test_scale_data(self): # Test data scaling fake_mf = deepcopy(self.MINIMAL_MF) fake_mf.Rows = 2 fake_mf.Columns = 3 - fake_mf.PerFrameFunctionalGroupsSequence = [Dataset() for _ in range(4)] + fake_mf.PerFrameFunctionalGroupsSequence = [pydicom.Dataset() for _ in range(4)] MFW = self.WRAPCLASS data = np.arange(24).reshape((2, 3, 4), order='F') assert_array_equal(data, MFW(fake_mf)._scale_data(data)) @@ -947,11 +954,11 @@ def test_scale_data(self): fake_mf.RescaleIntercept = -1.0 assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) # RealWorldValueMapping takes precedence, but only with defined units - fake_mf.RealWorldValueMappingSequence = [Dataset()] + fake_mf.RealWorldValueMappingSequence = [pydicom.Dataset()] fake_mf.RealWorldValueMappingSequence[0].RealWorldValueSlope = 10.0 fake_mf.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -5.0 assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) - fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [Dataset()] + fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [pydicom.Dataset()] fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[0].CodeMeaning = '%' assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[ @@ -959,10 +966,12 @@ def test_scale_data(self): ].CodeMeaning = 'no units' assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) # Possible to have more than one RealWorldValueMapping, use first one with defined units - fake_mf.RealWorldValueMappingSequence.append(Dataset()) + fake_mf.RealWorldValueMappingSequence.append(pydicom.Dataset()) fake_mf.RealWorldValueMappingSequence[-1].RealWorldValueSlope = 15.0 fake_mf.RealWorldValueMappingSequence[-1].RealWorldValueIntercept = -3.0 - fake_mf.RealWorldValueMappingSequence[-1].MeasurementUnitsCodeSequence = [Dataset()] + fake_mf.RealWorldValueMappingSequence[-1].MeasurementUnitsCodeSequence = [ + pydicom.Dataset() + ] fake_mf.RealWorldValueMappingSequence[-1].MeasurementUnitsCodeSequence[0].CodeMeaning = '%' assert_array_equal(data * 15 - 3, MFW(fake_mf)._scale_data(data)) # A global RWV scale takes precedence over per-frame PixelValueTransformation @@ -988,10 +997,12 @@ def test_scale_data(self): assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) # A per-frame RWV scaling takes precedence over per-frame PixelValueTransformation for frame in frames: - frame.RealWorldValueMappingSequence = [Dataset()] + frame.RealWorldValueMappingSequence = [pydicom.Dataset()] frame.RealWorldValueMappingSequence[0].RealWorldValueSlope = 10.0 frame.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -5.0 - frame.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [Dataset()] + frame.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [ + pydicom.Dataset() + ] frame.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[ 0 ].CodeMeaning = '%' @@ -1005,12 +1016,13 @@ def test_scale_data(self): MFW(fake_mf)._scale_data(data), ) + @dicom_test def test_philips_scale_data(self): fake_mf = deepcopy(self.MINIMAL_MF) fake_mf.Manufacturer = 'Philips' fake_mf.Rows = 2 fake_mf.Columns = 3 - fake_mf.PerFrameFunctionalGroupsSequence = [Dataset() for _ in range(4)] + fake_mf.PerFrameFunctionalGroupsSequence = [pydicom.Dataset() for _ in range(4)] MFW = self.WRAPCLASS data = np.arange(24).reshape((2, 3, 4), order='F') # Unlike other manufacturers, public scale factors from Philips without defined @@ -1040,12 +1052,12 @@ def test_philips_scale_data(self): fake_mf.RescaleType = 'mrad' assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) # A RWV scale factor with defined units takes precdence - shared = Dataset() + shared = pydicom.Dataset() fake_mf.SharedFunctionalGroupsSequence = [shared] - rwv_map = Dataset() + rwv_map = pydicom.Dataset() rwv_map.RealWorldValueSlope = 10.0 rwv_map.RealWorldValueIntercept = -5.0 - rwv_map.MeasurementUnitsCodeSequence = [Dataset()] + rwv_map.MeasurementUnitsCodeSequence = [pydicom.Dataset()] rwv_map.MeasurementUnitsCodeSequence[0].CodeMeaning = '%' shared.RealWorldValueMappingSequence = [rwv_map] assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) @@ -1057,7 +1069,7 @@ def test_philips_scale_data(self): fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) # Simplest case is all frames have same (valid) scale factor for frame in fake_mf.PerFrameFunctionalGroupsSequence: - pix_trans = Dataset() + pix_trans = pydicom.Dataset() pix_trans.RescaleSlope = 2.5 pix_trans.RescaleIntercept = -4 pix_trans.RescaleType = 'mrad' @@ -1084,10 +1096,10 @@ def test_philips_scale_data(self): ) # Again RWV scale factors take precedence for frame_idx, frame in enumerate(fake_mf.PerFrameFunctionalGroupsSequence): - rwv_map = Dataset() + rwv_map = pydicom.Dataset() rwv_map.RealWorldValueSlope = 14.0 - frame_idx rwv_map.RealWorldValueIntercept = 5.0 - rwv_map.MeasurementUnitsCodeSequence = [Dataset()] + rwv_map.MeasurementUnitsCodeSequence = [pydicom.Dataset()] rwv_map.MeasurementUnitsCodeSequence[0].CodeMeaning = '%' frame.RealWorldValueMappingSequence = [rwv_map] assert_array_equal( From 5203368461dbd720be6e776d52803a5ac81fe434 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Sep 2024 14:07:31 -0400 Subject: [PATCH 609/702] fix: Update order of indices on mouseclick --- nibabel/viewers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index e66a34149a..0dc2f0dafc 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -492,10 +492,11 @@ def _on_mouse(self, event): x, y = event.xdata, event.ydata x = self._sizes[xax] - x if self._flips[xax] else x y = self._sizes[yax] - y if self._flips[yax] else y - idxs = [None, None, None, 1.0] + idxs = np.ones(4) idxs[xax] = x idxs[yax] = y idxs[ii] = self._data_idx[ii] + idxs[:3] = idxs[self._order] self._set_position(*np.dot(self._affine, idxs)[:3]) self._draw() From 4f36bc7a5591a4ac5ac416a9586a4ad8ec53148c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Sep 2024 14:20:26 -0400 Subject: [PATCH 610/702] test: Add regression test for rotated data --- nibabel/tests/test_viewers.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 53f4a32bdc..72d839c923 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -102,3 +102,35 @@ def test_viewer(): v2.link_to(v1) # shouldn't do anything v1.close() v2.close() + + +@needs_mpl +def test_viewer_nonRAS(): + data1 = np.random.rand(10, 20, 40) + data1[5, 10, :] = 0 + data1[5, :, 30] = 0 + data1[:, 10, 30] = 0 + # RSA affine + aff1 = np.array([[1, 0, 0, -5], [0, 0, 1, -30], [0, 1, 0, -10], [0, 0, 0, 1]]) + o1 = OrthoSlicer3D(data1, aff1) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + + # Sagittal view: [0, I->S, P->A], so data is transposed, matching plot array + assert_array_equal(sag, data1[5, :, :]) + # Coronal view: [L->R, I->S, 0]. Data is not transposed, transpose to match plot array + assert_array_equal(cor, data1[:, :, 30].T) + # Axial view: [L->R, 0, P->A]. Data is not transposed, transpose to match plot array + assert_array_equal(axi, data1[:, 10, :].T) + + o1.set_position(1, 2, 3) # R, A, S coordinates + + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + + # Shift 1 right, 2 anterior, 3 superior + assert_array_equal(sag, data1[6, :, :]) + assert_array_equal(cor, data1[:, :, 32].T) + assert_array_equal(axi, data1[:, 13, :].T) From 032f6df03de1c3a39b22ebe88694b981ae0b000d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Sep 2024 12:53:06 -0400 Subject: [PATCH 611/702] Revert "ENH: Add writer for Siemens CSA header" --- nibabel/nicom/csareader.py | 110 -------------------------- nibabel/nicom/tests/test_csareader.py | 11 --- 2 files changed, 121 deletions(-) diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index dd081b22c2..df379e0be8 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -1,7 +1,6 @@ """CSA header reader from SPM spec""" import numpy as np -import struct from .structreader import Unpacker from .utils import find_private_section @@ -29,10 +28,6 @@ class CSAReadError(CSAError): pass -class CSAWriteError(CSAError): - pass - - def get_csa_header(dcm_data, csa_type='image'): """Get CSA header information from DICOM header @@ -166,96 +161,6 @@ def read(csa_str): return csa_dict -def write(csa_header): - ''' Write string from CSA header `csa_header` - - Parameters - ---------- - csa_header : dict - header information as dict, where `header` has fields (at least) - ``type, n_tags, tags``. ``header['tags']`` is also a dictionary - with one key, value pair for each tag in the header. - - Returns - ------- - csa_str : str - byte string containing CSA header information - ''' - result = [] - if csa_header['type'] == 2: - result.append(b'SV10') - result.append(csa_header['unused0']) - if not 0 < csa_header['n_tags'] <= 128: - raise CSAWriteError('Number of tags `t` should be ' - '0 < t <= 128') - result.append(struct.pack('2I', - csa_header['n_tags'], - csa_header['check']) - ) - - # Build list of tags in correct order - tags = list(csa_header['tags'].items()) - tags.sort(key=lambda x: x[1]['tag_no']) - tag0_n_items = tags[0][1]['n_items'] - - # Add the information for each tag - for tag_name, tag_dict in tags: - vm = tag_dict['vm'] - vr = tag_dict['vr'] - n_items = tag_dict['n_items'] - assert n_items < 100 - result.append(struct.pack('64si4s3i', - make_nt_str(tag_name), - vm, - make_nt_str(vr), - tag_dict['syngodt'], - n_items, - tag_dict['last3']) - ) - - # Figure out the number of values for this tag - if vm == 0: - n_values = n_items - else: - n_values = vm - - # Add each item for this tag - for item_no in range(n_items): - # Figure out the item length - if item_no >= n_values or tag_dict['items'][item_no] == '': - item_len = 0 - else: - item = tag_dict['items'][item_no] - if not isinstance(item, str): - item = str(item) - item_nt_str = make_nt_str(item) - item_len = len(item_nt_str) - - # These values aren't actually preserved in the dict - # representation of the header. Best we can do is set the ones - # that determine the item length appropriately. - x0, x1, x2, x3 = 0, 0, 0, 0 - if csa_header['type'] == 1: # CSA1 - odd length calculation - x0 = tag0_n_items + item_len - if item_len < 0 or (ptr + item_len) > csa_len: - if item_no < vm: - items.append('') - break - else: # CSA2 - x1 = item_len - result.append(struct.pack('4i', x0, x1, x2, x3)) - - if item_len == 0: - continue - - result.append(item_nt_str) - # go to 4 byte boundary - plus4 = item_len % 4 - if plus4 != 0: - result.append(b'\x00' * (4 - plus4)) - return b''.join(result) - - def get_scalar(csa_dict, tag_name): try: items = csa_dict['tags'][tag_name]['items'] @@ -353,18 +258,3 @@ def nt_str(s): if zero_pos == -1: return s return s[:zero_pos].decode('latin-1') - - -def make_nt_str(s): - ''' Create a null terminated byte string from a unicode object. - - Parameters - ---------- - s : unicode - - Returns - ------- - result : bytes - s encoded as latin-1 with a null char appended - ''' - return s.encode('latin-1') + b'\x00' diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 67ae44ecbf..f31f4a3935 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -130,14 +130,3 @@ def test_missing_csa_elem(): del dcm[csa_tag] hdr = csa.get_csa_header(dcm, 'image') assert hdr is None - - -def test_read_write_rt(): - # Try doing a read-write-read round trip and make sure the dictionary - # representation of the header is the same. We can't exactly reproduce the - # original string representation currently. - for csa_str in (CSA2_B0, CSA2_B1000): - csa_info = csa.read(csa_str) - new_csa_str = csa.write(csa_info) - new_csa_info = csa.read(new_csa_str) - assert csa_info == new_csa_info From a70ab5417143806330f00b59fd9e28537b6ebe3e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Sep 2024 14:30:03 -0400 Subject: [PATCH 612/702] TYP: Ignore overzealous warning for min/max with numpy scalars --- nibabel/volumeutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 29b954dbb3..c2387f0949 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -624,7 +624,7 @@ def array_to_file( # pre scale thresholds mn, mx = _dt_min_max(in_dtype, mn, mx) mn_out, mx_out = _dt_min_max(out_dtype) - pre_clips = max(mn, mn_out), min(mx, mx_out) + pre_clips = max(mn, mn_out), min(mx, mx_out) # type: ignore[type-var] return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # In any case, we do not want to check for nans because we've already # disallowed scaling that generates nans From a1fff406a18313ff67f9ed6abd9fce58dbb65e59 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Sep 2024 14:32:04 -0400 Subject: [PATCH 613/702] Update pre-commit config --- .pre-commit-config.yaml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b348393a45..4f49318eb0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ exclude: ".*/data/.*" repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -13,15 +13,18 @@ repos: - id: check-merge-conflict - id: check-vcs-permalinks - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.4 + rev: v0.6.4 hooks: - id: ruff - args: [--fix, --show-fixes, --exit-non-zero-on-fix] + args: [ --fix ] exclude: = ["doc", "tools"] - id: ruff-format exclude: = ["doc", "tools"] + - id: ruff + args: [ --select, ISC001, --fix ] + exclude: = ["doc", "tools"] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.5.1 + rev: v1.11.2 hooks: - id: mypy # Sync with project.optional-dependencies.typing @@ -36,7 +39,7 @@ repos: args: ["nibabel"] pass_filenames: false - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.3.0 hooks: - id: codespell additional_dependencies: From e2fe1903f73c4c58865af34fd2ab8781c58ab7e8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Sep 2024 12:16:47 -0400 Subject: [PATCH 614/702] typ: Ignore Pointset.__rmatmul__/ndarray.__matmul__ inconsistency --- nibabel/pointset.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nibabel/pointset.py b/nibabel/pointset.py index 70a802480d..889a8c70cd 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -101,7 +101,11 @@ def dim(self) -> int: """The dimensionality of the space the coordinates are in""" return self.coordinates.shape[1] - self.homogeneous - def __rmatmul__(self, affine: np.ndarray) -> Self: + # Use __rmatmul__ to prefer to compose affines. Mypy does not like that + # this conflicts with ndarray.__matmul__. We will need some more feedback + # on how this plays out for type-checking or code suggestions before we + # can do better than ignore. + def __rmatmul__(self, affine: np.ndarray) -> Self: # type: ignore[misc] """Apply an affine transformation to the pointset This will return a new pointset with an updated affine matrix only. From 7a502a3d052cc68ac3c4ae22b89447ff9c53d013 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 5 Jul 2024 17:16:05 -0400 Subject: [PATCH 615/702] MNT: Require typing_extensions for Python <3.13 --- pyproject.toml | 1 + tox.ini | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ff5168f9c6..34d9f7bb50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,7 @@ dependencies = [ "numpy >=1.20", "packaging >=17", "importlib_resources >=5.12; python_version < '3.12'", + "typing_extensions >=4.6; python_version < '3.13'", ] classifiers = [ "Development Status :: 5 - Production/Stable", diff --git a/tox.ini b/tox.ini index 5df35c8d38..bd99d986c2 100644 --- a/tox.ini +++ b/tox.ini @@ -77,7 +77,8 @@ extras = test deps = # General minimum dependencies: pin based on API usage min: packaging ==17 - min: importlib_resources ==1.3; python_version < '3.9' + min: importlib_resources ==5.12; python_version < '3.12' + min: typing_extensions ==4.6; python_version < '3.13' # NEP29/SPEC0 + 1yr: Test on minor release series within the last 3 years # We're extending this to all optional dependencies # This only affects the range that we test on; numpy is the only non-optional From bb8b808622dad737acbe0e881423ad22a4849e38 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 5 Jul 2024 15:51:04 -0400 Subject: [PATCH 616/702] RF: Add generic NiftiExtension base class Nifti1Extension is a non-ideal base class for NIfTI extensions because it assumes that it is safe to store use a null transformation, and thus default to `bytes` objects. This makes it difficult to define its typing behavior in a way that allows subclasses to refine the type such that type-checkers understand it. This patch creates a generic `NiftiExtension` class that parameterizes the "runtime representation" type. Nifti1Extension subclasses with another parameter that defaults to `bytes`, allowing it to be subclassed in turn (preserving the Nifti1Extension -> Nifti1DicomExtension subclass relationship) while still emitting `bytes`. We could have simply made `Nifti1Extension` the base class, but the mangle/unmangle methods need some casts or ignore comments to type-check cleanly. This separation allows us to have a clean base class with the legacy hacks cordoned off into an subclass. --- nibabel/nifti1.py | 264 +++++++++++++++++++++-------------- nibabel/tests/test_nifti1.py | 6 +- 2 files changed, 166 insertions(+), 104 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index ecd94c10de..791bf3b1e5 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -13,11 +13,13 @@ from __future__ import annotations +import typing as ty import warnings from io import BytesIO import numpy as np import numpy.linalg as npl +from typing_extensions import TypeVar # PY312 from . import analyze # module import from .arrayproxy import get_obj_dtype @@ -31,7 +33,19 @@ from .spm99analyze import SpmAnalyzeHeader from .volumeutils import Recoder, endian_codes, make_dt_codes -pdcm, have_dicom, _ = optional_package('pydicom') +if ty.TYPE_CHECKING: + import pydicom as pdcm + + have_dicom = True + DicomDataset = pdcm.Dataset +else: + pdcm, have_dicom, _ = optional_package('pydicom') + if have_dicom: + DicomDataset = pdcm.Dataset + else: + DicomDataset = ty.Any + +T = TypeVar('T', default=bytes) # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes @@ -283,15 +297,19 @@ ) -class Nifti1Extension: - """Baseclass for NIfTI1 header extensions. +class NiftiExtension(ty.Generic[T]): + """Base class for NIfTI header extensions.""" - This class is sufficient to handle very simple text-based extensions, such - as `comment`. More sophisticated extensions should/will be supported by - dedicated subclasses. - """ + code: int + encoding: ty.Optional[str] = None + _content: bytes + _object: ty.Optional[T] = None - def __init__(self, code, content): + def __init__( + self, + code: ty.Union[int, str], + content: bytes, + ) -> None: """ Parameters ---------- @@ -299,94 +317,83 @@ def __init__(self, code, content): Canonical extension code as defined in the NIfTI standard, given either as integer or corresponding label (see :data:`~nibabel.nifti1.extension_codes`) - content : str - Extension content as read from the NIfTI file header. This content is - converted into a runtime representation. + content : bytes + Extension content as read from the NIfTI file header. This content may + be converted into a runtime representation. """ try: - self._code = extension_codes.code[code] + self.code = extension_codes.code[code] # type: ignore[assignment] except KeyError: - # XXX or fail or at least complain? - self._code = code - self._content = self._unmangle(content) + self.code = code # type: ignore[assignment] + self._content = content - def _unmangle(self, value): - """Convert the extension content into its runtime representation. + # Handle (de)serialization of extension content + # Subclasses may implement these methods to provide an alternative + # view of the extension content. If left unimplemented, the content + # must be bytes and is not modified. + def _mangle(self, obj: T) -> bytes: + raise NotImplementedError - The default implementation does nothing at all. + def _unmangle(self, content: bytes) -> T: + raise NotImplementedError - Parameters - ---------- - value : str - Extension content as read from file. + def _sync(self) -> None: + """Synchronize content with object. - Returns - ------- - The same object that was passed as `value`. - - Notes - ----- - Subclasses should reimplement this method to provide the desired - unmangling procedure and may return any type of object. + This permits the runtime representation to be modified in-place + and updates the bytes representation accordingly. """ - return value - - def _mangle(self, value): - """Convert the extension content into NIfTI file header representation. + if self._object is not None: + self._content = self._mangle(self._object) - The default implementation does nothing at all. - - Parameters - ---------- - value : str - Extension content in runtime form. + def __repr__(self) -> str: + try: + code = extension_codes.label[self.code] + except KeyError: + # deal with unknown codes + code = self.code + return f'{self.__class__.__name__}({code}, {self._content!r})' - Returns - ------- - str + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, self.__class__) + and self.code == other.code + and self.content == other.content + ) - Notes - ----- - Subclasses should reimplement this method to provide the desired - mangling procedure. - """ - return value + def __ne__(self, other): + return not self == other def get_code(self): """Return the canonical extension type code.""" - return self._code + return self.code - def get_content(self): - """Return the extension content in its runtime representation.""" + @property + def content(self) -> bytes: + """Return the extension content as raw bytes.""" + self._sync() return self._content - def get_sizeondisk(self): + def get_content(self) -> T: + """Return the extension content in its runtime representation. + + This method may return a different type for each extension type. + """ + if self._object is None: + self._object = self._unmangle(self._content) + return self._object + + def get_sizeondisk(self) -> int: """Return the size of the extension in the NIfTI file.""" + self._sync() # need raw value size plus 8 bytes for esize and ecode - size = len(self._mangle(self._content)) - size += 8 + size = len(self._content) + 8 # extensions size has to be a multiple of 16 bytes if size % 16 != 0: size += 16 - (size % 16) return size - def __repr__(self): - try: - code = extension_codes.label[self._code] - except KeyError: - # deal with unknown codes - code = self._code - - s = f"Nifti1Extension('{code}', '{self._content}')" - return s - - def __eq__(self, other): - return (self._code, self._content) == (other._code, other._content) - - def __ne__(self, other): - return not self == other - - def write_to(self, fileobj, byteswap): + def write_to(self, fileobj: ty.BinaryIO, byteswap: bool = False) -> None: """Write header extensions to fileobj Write starts at fileobj current file position. @@ -402,22 +409,74 @@ def write_to(self, fileobj, byteswap): ------- None """ + self._sync() extstart = fileobj.tell() rawsize = self.get_sizeondisk() # write esize and ecode first - extinfo = np.array((rawsize, self._code), dtype=np.int32) + extinfo = np.array((rawsize, self.code), dtype=np.int32) if byteswap: extinfo = extinfo.byteswap() fileobj.write(extinfo.tobytes()) # followed by the actual extension content # XXX if mangling upon load is implemented, it should be reverted here - fileobj.write(self._mangle(self._content)) + fileobj.write(self._content) # be nice and zero out remaining part of the extension till the # next 16 byte border fileobj.write(b'\x00' * (extstart + rawsize - fileobj.tell())) -class Nifti1DicomExtension(Nifti1Extension): +class Nifti1Extension(NiftiExtension[T]): + """Baseclass for NIfTI1 header extensions. + + This class is sufficient to handle very simple text-based extensions, such + as `comment`. More sophisticated extensions should/will be supported by + dedicated subclasses. + """ + + def _unmangle(self, value: bytes) -> T: + """Convert the extension content into its runtime representation. + + The default implementation does nothing at all. + + Parameters + ---------- + value : str + Extension content as read from file. + + Returns + ------- + The same object that was passed as `value`. + + Notes + ----- + Subclasses should reimplement this method to provide the desired + unmangling procedure and may return any type of object. + """ + return value # type: ignore[return-value] + + def _mangle(self, value: T) -> bytes: + """Convert the extension content into NIfTI file header representation. + + The default implementation does nothing at all. + + Parameters + ---------- + value : str + Extension content in runtime form. + + Returns + ------- + str + + Notes + ----- + Subclasses should reimplement this method to provide the desired + mangling procedure. + """ + return value # type: ignore[return-value] + + +class Nifti1DicomExtension(Nifti1Extension[DicomDataset]): """NIfTI1 DICOM header extension This class is a thin wrapper around pydicom to read a binary DICOM @@ -427,7 +486,12 @@ class Nifti1DicomExtension(Nifti1Extension): header. """ - def __init__(self, code, content, parent_hdr=None): + def __init__( + self, + code: ty.Union[int, str], + content: ty.Union[bytes, DicomDataset, None] = None, + parent_hdr: ty.Optional[Nifti1Header] = None, + ) -> None: """ Parameters ---------- @@ -452,30 +516,30 @@ def __init__(self, code, content, parent_hdr=None): code should always be 2 for DICOM. """ - self._code = code - if parent_hdr: - self._is_little_endian = parent_hdr.endianness == '<' - else: - self._is_little_endian = True + self._is_little_endian = parent_hdr is None or parent_hdr.endianness == '<' + + bytes_content: bytes if isinstance(content, pdcm.dataset.Dataset): self._is_implicit_VR = False - self._raw_content = self._mangle(content) - self._content = content + self._object = content + bytes_content = self._mangle(content) elif isinstance(content, bytes): # Got a byte string - unmangle it - self._raw_content = content - self._is_implicit_VR = self._guess_implicit_VR() - ds = self._unmangle(content, self._is_implicit_VR, self._is_little_endian) - self._content = ds + self._is_implicit_VR = self._guess_implicit_VR(content) + self._object = self._unmangle(content) + bytes_content = content elif content is None: # initialize a new dicom dataset self._is_implicit_VR = False - self._content = pdcm.dataset.Dataset() + self._object = pdcm.dataset.Dataset() + bytes_content = self._mangle(self._object) else: raise TypeError( f'content must be either a bytestring or a pydicom Dataset. ' f'Got {content.__class__}' ) + super().__init__(code, bytes_content) - def _guess_implicit_VR(self): + @staticmethod + def _guess_implicit_VR(content) -> bool: """Try to guess DICOM syntax by checking for valid VRs. Without a DICOM Transfer Syntax, it's difficult to tell if Value @@ -483,19 +547,17 @@ def _guess_implicit_VR(self): This reads where the first VR would be and checks it against a list of valid VRs """ - potential_vr = self._raw_content[4:6].decode() - if potential_vr in pdcm.values.converters.keys(): - implicit_VR = False - else: - implicit_VR = True - return implicit_VR - - def _unmangle(self, value, is_implicit_VR=False, is_little_endian=True): - bio = BytesIO(value) - ds = pdcm.filereader.read_dataset(bio, is_implicit_VR, is_little_endian) - return ds + potential_vr = content[4:6].decode() + return potential_vr not in pdcm.values.converters.keys() + + def _unmangle(self, obj: bytes) -> DicomDataset: + return pdcm.filereader.read_dataset( + BytesIO(obj), + self._is_implicit_VR, + self._is_little_endian, + ) - def _mangle(self, dataset): + def _mangle(self, dataset: DicomDataset) -> bytes: bio = BytesIO() dio = pdcm.filebase.DicomFileLike(bio) dio.is_implicit_VR = self._is_implicit_VR diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 5ee4fb3c15..d1fa4afd0f 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -1339,7 +1339,7 @@ def test_nifti_dicom_extension(): dcmbytes_explicit = struct.pack('') # Big Endian Nifti1Header dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) assert dcmext.__class__ == Nifti1DicomExtension - assert dcmext._guess_implicit_VR() is False + assert dcmext._is_implicit_VR is False assert dcmext.get_code() == 2 assert dcmext.get_content().PatientID == 'NiPy' assert dcmext.get_content()[0x10, 0x20].value == 'NiPy' From 2e2a0e648d445247c6e35ed76fd5299c5a87c508 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 5 Jul 2024 16:19:39 -0400 Subject: [PATCH 617/702] ENH: Add .text and .json() accessors for ease --- nibabel/nifti1.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 791bf3b1e5..bab8031fea 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -13,6 +13,7 @@ from __future__ import annotations +import json import typing as ty import warnings from io import BytesIO @@ -368,16 +369,38 @@ def get_code(self): """Return the canonical extension type code.""" return self.code + # Canonical access to extension content + # Follows the lead of httpx.Response .content, .text and .json() + # properties/methods @property def content(self) -> bytes: """Return the extension content as raw bytes.""" self._sync() return self._content + @property + def text(self) -> str: + """Attempt to decode the extension content as text. + + The encoding is determined by the `encoding` attribute, which may be + set by the user or subclass. If not set, the default encoding is 'utf-8'. + """ + return self.content.decode(self.encoding or 'utf-8') + + def json(self) -> ty.Any: + """Attempt to decode the extension content as JSON. + + If the content is not valid JSON, a JSONDecodeError or UnicodeDecodeError + will be raised. + """ + return json.loads(self.content) + def get_content(self) -> T: """Return the extension content in its runtime representation. This method may return a different type for each extension type. + For simple use cases, consider using ``.content``, ``.text`` or ``.json()`` + instead. """ if self._object is None: self._object = self._unmangle(self._content) From e54fab9f77961c3a517ccbaa151e24dfd16d1bec Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 6 Jul 2024 13:07:18 -0400 Subject: [PATCH 618/702] TEST: Test content, text and json() access --- nibabel/tests/test_nifti1.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index d1fa4afd0f..23e71c8324 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -1224,6 +1224,32 @@ def test_ext_eq(): assert not ext == ext2 +def test_extension_content_access(): + ext = Nifti1Extension('comment', b'123') + # Unmangled content access + assert ext.get_content() == b'123' + + # Raw, text and JSON access + assert ext.content == b'123' + assert ext.text == '123' + assert ext.json() == 123 + + # Encoding can be set + ext.encoding = 'ascii' + assert ext.text == '123' + + # Test that encoding errors are caught + ascii_ext = Nifti1Extension('comment', 'hôpital'.encode('utf-8')) + ascii_ext.encoding = 'ascii' + with pytest.raises(UnicodeDecodeError): + ascii_ext.text + + json_ext = Nifti1Extension('unknown', b'{"a": 1}') + assert json_ext.content == b'{"a": 1}' + assert json_ext.text == '{"a": 1}' + assert json_ext.json() == {'a': 1} + + def test_extension_codes(): for k in extension_codes.keys(): Nifti1Extension(k, 'somevalue') From ef60adc24274f658820c8d69fdf58afa4282f7eb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 7 Jul 2024 08:08:52 -0400 Subject: [PATCH 619/702] ENH: Add from_bytes method for subclasses with known codes --- nibabel/nifti1.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index bab8031fea..0fc92f3aaf 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -20,7 +20,7 @@ import numpy as np import numpy.linalg as npl -from typing_extensions import TypeVar # PY312 +from typing_extensions import Self, TypeVar # PY312 from . import analyze # module import from .arrayproxy import get_obj_dtype @@ -328,6 +328,12 @@ def __init__( self.code = code # type: ignore[assignment] self._content = content + @classmethod + def from_bytes(cls, content: bytes) -> Self: + if not hasattr(cls, 'code'): + raise NotImplementedError('from_bytes() requires a class attribute `code`') + return cls(cls.code, content) + # Handle (de)serialization of extension content # Subclasses may implement these methods to provide an alternative # view of the extension content. If left unimplemented, the content @@ -509,6 +515,8 @@ class Nifti1DicomExtension(Nifti1Extension[DicomDataset]): header. """ + code = 2 + def __init__( self, code: ty.Union[int, str], From 8b0e69959b9b87f3f833a62a738faa6b66dda278 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 7 Jul 2024 08:09:32 -0400 Subject: [PATCH 620/702] TYP: Annotate Cifti2Extension --- nibabel/cifti2/cifti2.py | 2 +- nibabel/cifti2/parse_cifti2.py | 12 ++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index cb2e0cfaf4..b2b67978b7 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -1570,7 +1570,7 @@ def to_file_map(self, file_map=None, dtype=None): self.update_headers() header = self._nifti_header - extension = Cifti2Extension(content=self.header.to_xml()) + extension = Cifti2Extension.from_bytes(self.header.to_xml()) header.extensions = Nifti1Extensions( ext for ext in header.extensions if not isinstance(ext, Cifti2Extension) ) diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 48c2e06537..764e3ae203 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -40,19 +40,15 @@ ) -class Cifti2Extension(Nifti1Extension): +class Cifti2Extension(Nifti1Extension[Cifti2Header]): code = 32 - def __init__(self, code=None, content=None): - Nifti1Extension.__init__(self, code=code or self.code, content=content) - - def _unmangle(self, value): + def _unmangle(self, value: bytes) -> Cifti2Header: parser = Cifti2Parser() parser.parse(string=value) - self._content = parser.header - return self._content + return parser.header - def _mangle(self, value): + def _mangle(self, value: Cifti2Header) -> bytes: if not isinstance(value, Cifti2Header): raise ValueError('Can only mangle a Cifti2Header.') return value.to_xml() From 7237eba757039d5b8cbf9278ff2e33e4488f353b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Sep 2024 12:04:22 -0400 Subject: [PATCH 621/702] rf: Allow extensions to be constructed from objects without serialization --- nibabel/nifti1.py | 77 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 19 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 0fc92f3aaf..d93e4615cc 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -299,7 +299,25 @@ class NiftiExtension(ty.Generic[T]): - """Base class for NIfTI header extensions.""" + """Base class for NIfTI header extensions. + + This class provides access to the extension content in various forms. + For simple extensions that expose data as bytes, text or JSON, this class + is sufficient. More complex extensions should be implemented as subclasses + that provide custom serialization/deserialization methods. + + Efficiency note: + + This class assumes that the runtime representation of the extension content + is mutable. Once a runtime representation is set, it is cached and will be + serialized on any attempt to access the extension content as bytes, including + determining the size of the extension in the NIfTI file. + + If the runtime representation is never accessed, the raw bytes will be used + without modification. While avoiding unnecessary deserialization, if there + are bytestrings that do not produce a valid runtime representation, they will + be written as-is, and may cause errors downstream. + """ code: int encoding: ty.Optional[str] = None @@ -309,7 +327,8 @@ class NiftiExtension(ty.Generic[T]): def __init__( self, code: ty.Union[int, str], - content: bytes, + content: bytes = b'', + object: ty.Optional[T] = None, ) -> None: """ Parameters @@ -318,21 +337,40 @@ def __init__( Canonical extension code as defined in the NIfTI standard, given either as integer or corresponding label (see :data:`~nibabel.nifti1.extension_codes`) - content : bytes - Extension content as read from the NIfTI file header. This content may - be converted into a runtime representation. + content : bytes, optional + Extension content as read from the NIfTI file header. + object : optional + Extension content in runtime form. """ try: self.code = extension_codes.code[code] # type: ignore[assignment] except KeyError: self.code = code # type: ignore[assignment] self._content = content + if object is not None: + self._object = object @classmethod def from_bytes(cls, content: bytes) -> Self: + """Create an extension from raw bytes. + + This constructor may only be used in extension classes with a class + attribute `code` to indicate the extension type. + """ if not hasattr(cls, 'code'): raise NotImplementedError('from_bytes() requires a class attribute `code`') - return cls(cls.code, content) + return cls(cls.code, content=content) + + @classmethod + def from_object(cls, obj: T) -> Self: + """Create an extension from a runtime object. + + This constructor may only be used in extension classes with a class + attribute `code` to indicate the extension type. + """ + if not hasattr(cls, 'code'): + raise NotImplementedError('from_object() requires a class attribute `code`') + return cls(cls.code, object=obj) # Handle (de)serialization of extension content # Subclasses may implement these methods to provide an alternative @@ -401,7 +439,7 @@ def json(self) -> ty.Any: """ return json.loads(self.content) - def get_content(self) -> T: + def get_object(self) -> T: """Return the extension content in its runtime representation. This method may return a different type for each extension type. @@ -412,15 +450,14 @@ def get_content(self) -> T: self._object = self._unmangle(self._content) return self._object + # Backwards compatibility + get_content = get_object + def get_sizeondisk(self) -> int: """Return the size of the extension in the NIfTI file.""" - self._sync() - # need raw value size plus 8 bytes for esize and ecode - size = len(self._content) + 8 - # extensions size has to be a multiple of 16 bytes - if size % 16 != 0: - size += 16 - (size % 16) - return size + # need raw value size plus 8 bytes for esize and ecode, rounded up to next 16 bytes + # Rounding C+8 up to M is done by (C+8 + (M-1)) // M * M + return (len(self.content) + 23) // 16 * 16 def write_to(self, fileobj: ty.BinaryIO, byteswap: bool = False) -> None: """Write header extensions to fileobj @@ -438,20 +475,20 @@ def write_to(self, fileobj: ty.BinaryIO, byteswap: bool = False) -> None: ------- None """ - self._sync() extstart = fileobj.tell() - rawsize = self.get_sizeondisk() + rawsize = self.get_sizeondisk() # Calls _sync() # write esize and ecode first extinfo = np.array((rawsize, self.code), dtype=np.int32) if byteswap: extinfo = extinfo.byteswap() fileobj.write(extinfo.tobytes()) - # followed by the actual extension content - # XXX if mangling upon load is implemented, it should be reverted here + # followed by the actual extension content, synced above fileobj.write(self._content) # be nice and zero out remaining part of the extension till the # next 16 byte border - fileobj.write(b'\x00' * (extstart + rawsize - fileobj.tell())) + pad = extstart + rawsize - fileobj.tell() + if pad: + fileobj.write(bytes(pad)) class Nifti1Extension(NiftiExtension[T]): @@ -462,6 +499,8 @@ class Nifti1Extension(NiftiExtension[T]): dedicated subclasses. """ + code = 0 # Default to unknown extension + def _unmangle(self, value: bytes) -> T: """Convert the extension content into its runtime representation. From a0231b1c5476550506fde114a9df305a5f4b8913 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Sep 2024 12:08:59 -0400 Subject: [PATCH 622/702] rf: Construct DicomExtensions more simply --- nibabel/nifti1.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index d93e4615cc..da890a63ac 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -555,6 +555,8 @@ class Nifti1DicomExtension(Nifti1Extension[DicomDataset]): """ code = 2 + _is_implict_VR: bool = False + _is_little_endian: bool = True def __init__( self, @@ -586,27 +588,25 @@ def __init__( code should always be 2 for DICOM. """ - self._is_little_endian = parent_hdr is None or parent_hdr.endianness == '<' + if code != 2: + raise ValueError(f'code must be 2 for DICOM. Got {code}.') + + if content is None: + content = pdcm.Dataset() + + if parent_hdr is not None: + self._is_little_endian = parent_hdr.endianness == '<' - bytes_content: bytes if isinstance(content, pdcm.dataset.Dataset): - self._is_implicit_VR = False - self._object = content - bytes_content = self._mangle(content) + super().__init__(code, object=content) elif isinstance(content, bytes): # Got a byte string - unmangle it self._is_implicit_VR = self._guess_implicit_VR(content) - self._object = self._unmangle(content) - bytes_content = content - elif content is None: # initialize a new dicom dataset - self._is_implicit_VR = False - self._object = pdcm.dataset.Dataset() - bytes_content = self._mangle(self._object) + super().__init__(code, content=content) else: raise TypeError( f'content must be either a bytestring or a pydicom Dataset. ' f'Got {content.__class__}' ) - super().__init__(code, bytes_content) @staticmethod def _guess_implicit_VR(content) -> bool: From 1936d246835ac1fdf207ebe329f4880559fb8de9 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 11 Jun 2024 22:32:37 -0400 Subject: [PATCH 623/702] TEST: Test NiftiJSONExtension --- nibabel/tests/test_nifti1.py | 51 ++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 23e71c8324..79f1c84d68 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -30,6 +30,7 @@ Nifti1Image, Nifti1Pair, Nifti1PairHeader, + NiftiJSONExtension, data_type_codes, extension_codes, load, @@ -1414,6 +1415,56 @@ def test_nifti_dicom_extension(): Nifti1DicomExtension(2, 0) +def test_json_extension(tmp_path): + nim = load(image_file) + hdr = nim.header + exts_container = hdr.extensions + + # Test basic functionality + json_ext = NiftiJSONExtension('ignore', b'{"key": "value"}') + assert json_ext.get_content() == {'key': 'value'} + byte_content = json_ext._mangle(json_ext.get_content()) + assert byte_content == b'{"key": "value"}' + json_obj = json_ext._unmangle(byte_content) + assert json_obj == {'key': 'value'} + size = 16 * ((len(byte_content) + 7) // 16 + 1) + assert json_ext.get_sizeondisk() == size + + def ext_to_bytes(ext, byteswap=False): + bio = BytesIO() + ext.write_to(bio, byteswap) + return bio.getvalue() + + # Check serialization + bytestring = ext_to_bytes(json_ext) + assert bytestring[:8] == struct.pack('<2I', size, extension_codes['ignore']) + assert bytestring[8:].startswith(byte_content) + assert len(bytestring) == size + + # Save to file and read back + exts_container.append(json_ext) + nim.to_filename(tmp_path / 'test.nii') + + # We used ignore, so it comes back as a Nifti1Extension + rt_img = Nifti1Image.from_filename(tmp_path / 'test.nii') + assert len(rt_img.header.extensions) == 3 + rt_ext = rt_img.header.extensions[-1] + assert rt_ext.get_code() == extension_codes['ignore'] + assert rt_ext.get_content() == byte_content + + # MRS is currently the only JSON extension + json_ext._code = extension_codes['mrs'] + nim.to_filename(tmp_path / 'test.nii') + + # Check that the extension is read back as a NiftiJSONExtension + rt_img = Nifti1Image.from_filename(tmp_path / 'test.nii') + assert len(rt_img.header.extensions) == 3 + rt_ext = rt_img.header.extensions[-1] + assert rt_ext.get_code() == extension_codes['mrs'] + assert isinstance(rt_ext, NiftiJSONExtension) + assert rt_ext.get_content() == json_obj + + class TestNifti1General: """Test class to test nifti1 in general From 061fbf566673296cb7c10007c62c02297139f334 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Sep 2024 19:49:46 -0400 Subject: [PATCH 624/702] feat: Add current extension codes --- nibabel/nifti1.py | 15 +++++++++++ nibabel/tests/test_nifti1.py | 51 ------------------------------------ 2 files changed, 15 insertions(+), 51 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index da890a63ac..31fed2e63c 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -652,6 +652,21 @@ def _mangle(self, dataset: DicomDataset) -> bytes: (12, 'workflow_fwds', Nifti1Extension), (14, 'freesurfer', Nifti1Extension), (16, 'pypickle', Nifti1Extension), + (18, 'mind_ident', NiftiExtension), + (20, 'b_value', NiftiExtension), + (22, 'spherical_direction', NiftiExtension), + (24, 'dt_component', NiftiExtension), + (26, 'shc_degreeorder', NiftiExtension), + (28, 'voxbo', NiftiExtension), + (30, 'caret', NiftiExtension), + ## Defined in nibabel.cifti2.parse_cifti2 + # (32, 'cifti', Cifti2Extension), + (34, 'variable_frame_timing', NiftiExtension), + (36, 'unassigned', NiftiExtension), + (38, 'eval', NiftiExtension), + (40, 'matlab', NiftiExtension), + (42, 'quantiphyse', NiftiExtension), + (44, 'mrs', NiftiExtension[ty.Dict[str, ty.Any]]), ), fields=('code', 'label', 'handler'), ) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 79f1c84d68..23e71c8324 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -30,7 +30,6 @@ Nifti1Image, Nifti1Pair, Nifti1PairHeader, - NiftiJSONExtension, data_type_codes, extension_codes, load, @@ -1415,56 +1414,6 @@ def test_nifti_dicom_extension(): Nifti1DicomExtension(2, 0) -def test_json_extension(tmp_path): - nim = load(image_file) - hdr = nim.header - exts_container = hdr.extensions - - # Test basic functionality - json_ext = NiftiJSONExtension('ignore', b'{"key": "value"}') - assert json_ext.get_content() == {'key': 'value'} - byte_content = json_ext._mangle(json_ext.get_content()) - assert byte_content == b'{"key": "value"}' - json_obj = json_ext._unmangle(byte_content) - assert json_obj == {'key': 'value'} - size = 16 * ((len(byte_content) + 7) // 16 + 1) - assert json_ext.get_sizeondisk() == size - - def ext_to_bytes(ext, byteswap=False): - bio = BytesIO() - ext.write_to(bio, byteswap) - return bio.getvalue() - - # Check serialization - bytestring = ext_to_bytes(json_ext) - assert bytestring[:8] == struct.pack('<2I', size, extension_codes['ignore']) - assert bytestring[8:].startswith(byte_content) - assert len(bytestring) == size - - # Save to file and read back - exts_container.append(json_ext) - nim.to_filename(tmp_path / 'test.nii') - - # We used ignore, so it comes back as a Nifti1Extension - rt_img = Nifti1Image.from_filename(tmp_path / 'test.nii') - assert len(rt_img.header.extensions) == 3 - rt_ext = rt_img.header.extensions[-1] - assert rt_ext.get_code() == extension_codes['ignore'] - assert rt_ext.get_content() == byte_content - - # MRS is currently the only JSON extension - json_ext._code = extension_codes['mrs'] - nim.to_filename(tmp_path / 'test.nii') - - # Check that the extension is read back as a NiftiJSONExtension - rt_img = Nifti1Image.from_filename(tmp_path / 'test.nii') - assert len(rt_img.header.extensions) == 3 - rt_ext = rt_img.header.extensions[-1] - assert rt_ext.get_code() == extension_codes['mrs'] - assert isinstance(rt_ext, NiftiJSONExtension) - assert rt_ext.get_content() == json_obj - - class TestNifti1General: """Test class to test nifti1 in general From 72a93c2d3d43cbf39faa633b972152bd6b23e139 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 7 Sep 2024 20:21:27 -0400 Subject: [PATCH 625/702] Update nibabel/nifti1.py --- nibabel/nifti1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 31fed2e63c..a22959dfd6 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -555,7 +555,7 @@ class Nifti1DicomExtension(Nifti1Extension[DicomDataset]): """ code = 2 - _is_implict_VR: bool = False + _is_implicit_VR: bool = False _is_little_endian: bool = True def __init__( From bb978c1c3dab40fc5fb12876059df526c85d33ad Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 8 Sep 2024 02:16:22 -0400 Subject: [PATCH 626/702] fix: Import from typing in Python 3.13 --- nibabel/nifti1.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a22959dfd6..ee6cec53a7 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -14,13 +14,18 @@ from __future__ import annotations import json +import sys import typing as ty import warnings from io import BytesIO import numpy as np import numpy.linalg as npl -from typing_extensions import Self, TypeVar # PY312 + +if sys.version_info <= (3, 12): + from typing_extensions import Self, TypeVar # PY312 +else: + from typing import Self, TypeVar from . import analyze # module import from .arrayproxy import get_obj_dtype From 398488ec600d01a432f46a2d2e94523245b897f9 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 8 Sep 2024 02:42:24 -0400 Subject: [PATCH 627/702] Update nibabel/nifti1.py --- nibabel/nifti1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index ee6cec53a7..626d217527 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -22,7 +22,7 @@ import numpy as np import numpy.linalg as npl -if sys.version_info <= (3, 12): +if sys.version_info < (3, 13): from typing_extensions import Self, TypeVar # PY312 else: from typing import Self, TypeVar From 4d09e33b530bc7dab87d0492db2bc1489795318c Mon Sep 17 00:00:00 2001 From: Guillaume Becq Date: Tue, 10 Sep 2024 19:00:07 +0200 Subject: [PATCH 628/702] Add files via upload --- nibabel/viewers.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 0dc2f0dafc..07881eb695 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -103,7 +103,7 @@ def __init__(self, data, affine=None, axes=None, title=None): # | | | | # | | | | # +---------+ +---------+ - # A --> <-- R + # A --> R --> # ^ +---------+ +---------+ # | | | | | # | Axial | | Vol | @@ -111,7 +111,7 @@ def __init__(self, data, affine=None, axes=None, title=None): # | | | | # | | | | # +---------+ +---------+ - # <-- R <-- t --> + # R --> <-- t --> fig, axes = plt.subplots(2, 2) fig.set_size_inches((8, 8), forward=True) @@ -419,7 +419,7 @@ def _set_position(self, x, y, z, notify=True): # deal with crosshairs loc = self._data_idx[ii] if self._flips[ii]: - loc = self._sizes[ii] - loc + loc = self._sizes[ii] - 1 - loc loc = [loc] * 2 if ii == 0: self._crosshairs[2]['vert'].set_xdata(loc) @@ -468,12 +468,17 @@ def _on_scroll(self, event): dv *= 1.0 if event.button == 'up' else -1.0 dv *= -1 if self._flips[ii] else 1 val = self._data_idx[ii] + dv + if ii == 3: self._set_volume_index(val) else: - coords = [self._data_idx[k] for k in range(3)] + [1.0] + coords = [self._data_idx[k] for k in range(3)] coords[ii] = val - self._set_position(*np.dot(self._affine, coords)[:3]) + coords_ordered = [0, 0, 0, 1] + for k in range(3): + coords_ordered[self._order[k]] = coords[k] + position = np.dot(self._affine, coords_ordered)[:3] + self._set_position(*position) self._draw() def _on_mouse(self, event): @@ -488,18 +493,19 @@ def _on_mouse(self, event): self._set_volume_index(event.xdata) else: # translate click xdata/ydata to physical position - xax, yax = [[1, 2], [0, 2], [0, 1]][ii] + xax, yax = [[self._order[1], self._order[2]], + [self._order[0], self._order[2]], + [self._order[0], self._order[1]]][ii] x, y = event.xdata, event.ydata - x = self._sizes[xax] - x if self._flips[xax] else x - y = self._sizes[yax] - y if self._flips[yax] else y + x = self._sizes[xax] - x - 1 if self._flips[xax] else x + y = self._sizes[yax] - y - 1 if self._flips[yax] else y idxs = np.ones(4) idxs[xax] = x idxs[yax] = y - idxs[ii] = self._data_idx[ii] - idxs[:3] = idxs[self._order] - self._set_position(*np.dot(self._affine, idxs)[:3]) + idxs[self._order[ii]] = self._data_idx[ii] + self._set_position(*np.dot(self._affine, idxs)[:3]) self._draw() - + def _on_keypress(self, event): """Handle mpl keypress events""" if event.key is not None and 'escape' in event.key: From 6bfdcafe31c66d9e1f5e6329e09e3a332cd5c6c0 Mon Sep 17 00:00:00 2001 From: Guillaume Becq Date: Tue, 10 Sep 2024 19:01:42 +0200 Subject: [PATCH 629/702] Add files via upload --- nibabel/tests/test_viewers.py | 200 ++++++++++++++++++++++++++++++++++ 1 file changed, 200 insertions(+) diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 72d839c923..dff93926db 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -134,3 +134,203 @@ def test_viewer_nonRAS(): assert_array_equal(sag, data1[6, :, :]) assert_array_equal(cor, data1[:, :, 32].T) assert_array_equal(axi, data1[:, 13, :].T) + + + +@needs_mpl +def test_viewer_nonRAS_on_mouse(): + """ + test on_mouse selection on non RAS matrices + + """ + # This affine simulates an acquisition on a quadruped subject that is in a prone position. + # This corresponds to an acquisition with: + # - LR inverted on scanner x (i) + # - IS on scanner y (j) + # - PA on scanner z (k) + # This example enables to test also OrthoSlicer3D properties `_flips` and `_order`. + + (I, J, K) = (10, 20, 40) + data1 = np.random.rand(I, J, K) + (i_target, j_target, k_target) = (2, 14, 12) + i1 = i_target - 2 + i2 = i_target + 2 + j1 = j_target - 3 + j2 = j_target + 3 + k1 = k_target - 4 + k2 = k_target + 4 + data1[i1: i2 + 1, j1: j2 + 1, k1: k2 + 1] = 0 + data1[i_target, j_target, k_target] = 1 + valp1 = 1.5 + valm1 = 0.5 + data1[i_target - 1, j_target, k_target] = valp1 # x flipped + data1[i_target + 1, j_target, k_target] = valm1 # x flipped + data1[i_target, j_target - 1, k_target] = valm1 + data1[i_target, j_target + 1, k_target] = valp1 + data1[i_target, j_target, k_target - 1] = valm1 + data1[i_target, j_target, k_target + 1] = valp1 + + aff1 = np.array([[-1, 0, 0, 5], + [0, 0, 1, -10], + [0, 1, 0, -30], + [0, 0, 0, 1]]) + + o1 = OrthoSlicer3D(data1, aff1) + + class Event: + def __init__(self): + self.name = "simulated mouse event" + self.button = 1 + + event = Event() + event.xdata = k_target + event.ydata = j_target + event.inaxes = o1._ims[0].axes + o1._on_mouse(event) + + event.inaxes = o1._ims[1].axes + event.xdata = (I - 1) - i_target # x flipped + event.ydata = j_target + o1._on_mouse(event) + + event.inaxes = o1._ims[2].axes + event.xdata = (I - 1) - i_target # x flipped + event.ydata = k_target + o1._on_mouse(event) + + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + + assert_array_equal(sag, data1[i_target, :, :]) # + assert_array_equal(cor, data1[::-1, :, k_target].T) # x flipped + assert_array_equal(axi, data1[::-1, j_target, :].T) # x flipped + return None + + +@needs_mpl +def test_viewer_nonRAS_on_scroll(): + """ + test scrolling on non RAS matrices + + """ + # This affine simulates an acquisition on a quadruped subject that is in a prone position. + # This corresponds to an acquisition with: + # - LR inverted on scanner x (i) + # - IS on scanner y (j) + # - PA on scanner z (k) + # This example enables to test also OrthoSlicer3D properties `_flips` and `_order`. + + (I, J, K) = (10, 20, 40) + data1 = np.random.rand(I, J, K) + (i_target, j_target, k_target) = (2, 14, 12) + i1 = i_target - 2 + i2 = i_target + 2 + j1 = j_target - 3 + j2 = j_target + 3 + k1 = k_target - 4 + k2 = k_target + 4 + data1[i1: i2 + 1, j1: j2 + 1, k1: k2 + 1] = 0 + data1[i_target, j_target, k_target] = 1 + valp1 = 1.5 + valm1 = 0.5 + data1[i_target - 1, j_target, k_target] = valp1 # x flipped + data1[i_target + 1, j_target, k_target] = valm1 # x flipped + data1[i_target, j_target - 1, k_target] = valm1 + data1[i_target, j_target + 1, k_target] = valp1 + data1[i_target, j_target, k_target - 1] = valm1 + data1[i_target, j_target, k_target + 1] = valp1 + + aff1 = np.array([[-1, 0, 0, 5], + [0, 0, 1, -10], + [0, 1, 0, -30], + [0, 0, 0, 1]]) + + o1 = OrthoSlicer3D(data1, aff1) + + class Event: + def __init__(self): + self.name = "simulated mouse event" + self.button = None + self.key = None + + i_last = data1.shape[0] - 1 + + [x_t, y_t, z_t] = list(aff1.dot(np.array([i_target, j_target, k_target, 1]))[:3]) + # print(x_t, y_t, z_t) + # scanner positions are x_t=3, y_t=2, z_t=16 + + event = Event() + + # Sagittal plane - one scroll up + # x coordinate is flipped so index decrease by 1 + o1.set_position(x_t, y_t, z_t) + event.inaxes = o1._ims[0].axes + event.button = 'up' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target - 1, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x + assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x + + # Sagittal plane - one scrolled down + o1.set_position(x_t, y_t, z_t) + event.button = 'down' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target + 1, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) + assert_array_equal(axi, data1[::-1, j_target, :].T) + + # Coronal plane - one scroll up + # y coordinate is increase by 1 + o1.set_position(x_t, y_t, z_t) + event.inaxes = o1._ims[1].axes + event.button = 'up' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target + 1].T) # ::-1 because the array is flipped in x + assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x + + # Coronal plane - one scrolled down + o1.set_position(x_t, y_t, z_t) + event.button = 'down' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target - 1].T) + assert_array_equal(axi, data1[::-1, j_target, :].T) + + # Axial plane - one scroll up + # y is increase by 1 + o1.set_position(x_t, y_t, z_t) + event.inaxes = o1._ims[2].axes + event.button = 'up' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x + assert_array_equal(axi, data1[::-1, j_target + 1, :].T) # ::-1 because the array is flipped in x + + # Axial plane - one scrolled down + o1.set_position(x_t, y_t, z_t) + event.button = 'down' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) + assert_array_equal(axi, data1[::-1, j_target - 1, :].T) + return None \ No newline at end of file From e1f28f382db87b5859e3e9dedeb9171a4b5d8621 Mon Sep 17 00:00:00 2001 From: Guillaume Becq Date: Fri, 13 Sep 2024 13:59:54 +0200 Subject: [PATCH 630/702] BF for non RAS matrices correct `_on_mouse`and `on_scroll` method --- nibabel/viewers.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 07881eb695..5181ace7bb 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -468,14 +468,14 @@ def _on_scroll(self, event): dv *= 1.0 if event.button == 'up' else -1.0 dv *= -1 if self._flips[ii] else 1 val = self._data_idx[ii] + dv - + if ii == 3: self._set_volume_index(val) else: coords = [self._data_idx[k] for k in range(3)] coords[ii] = val coords_ordered = [0, 0, 0, 1] - for k in range(3): + for k in range(3): coords_ordered[self._order[k]] = coords[k] position = np.dot(self._affine, coords_ordered)[:3] self._set_position(*position) @@ -493,9 +493,11 @@ def _on_mouse(self, event): self._set_volume_index(event.xdata) else: # translate click xdata/ydata to physical position - xax, yax = [[self._order[1], self._order[2]], - [self._order[0], self._order[2]], - [self._order[0], self._order[1]]][ii] + xax, yax = [ + [self._order[1], self._order[2]], + [self._order[0], self._order[2]], + [self._order[0], self._order[1]], + ][ii] x, y = event.xdata, event.ydata x = self._sizes[xax] - x - 1 if self._flips[xax] else x y = self._sizes[yax] - y - 1 if self._flips[yax] else y @@ -503,9 +505,9 @@ def _on_mouse(self, event): idxs[xax] = x idxs[yax] = y idxs[self._order[ii]] = self._data_idx[ii] - self._set_position(*np.dot(self._affine, idxs)[:3]) + self._set_position(*np.dot(self._affine, idxs)[:3]) self._draw() - + def _on_keypress(self, event): """Handle mpl keypress events""" if event.key is not None and 'escape' in event.key: From 5d89d2fb6c9056dba8d15bbca1445eaa467a6eb6 Mon Sep 17 00:00:00 2001 From: Guillaume Becq Date: Fri, 13 Sep 2024 14:00:55 +0200 Subject: [PATCH 631/702] test for BF viwers non RSA matrices --- nibabel/tests/test_viewers.py | 133 ++++++++++++++++------------------ 1 file changed, 64 insertions(+), 69 deletions(-) diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index dff93926db..fa22d9021a 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -136,20 +136,19 @@ def test_viewer_nonRAS(): assert_array_equal(axi, data1[:, 13, :].T) - @needs_mpl def test_viewer_nonRAS_on_mouse(): """ test on_mouse selection on non RAS matrices - + """ - # This affine simulates an acquisition on a quadruped subject that is in a prone position. - # This corresponds to an acquisition with: + # This affine simulates an acquisition on a quadruped subject that is in a prone position. + # This corresponds to an acquisition with: # - LR inverted on scanner x (i) # - IS on scanner y (j) # - PA on scanner z (k) - # This example enables to test also OrthoSlicer3D properties `_flips` and `_order`. - + # This example enables to test also OrthoSlicer3D properties `_flips` and `_order`. + (I, J, K) = (10, 20, 40) data1 = np.random.rand(I, J, K) (i_target, j_target, k_target) = (2, 14, 12) @@ -159,52 +158,49 @@ def test_viewer_nonRAS_on_mouse(): j2 = j_target + 3 k1 = k_target - 4 k2 = k_target + 4 - data1[i1: i2 + 1, j1: j2 + 1, k1: k2 + 1] = 0 + data1[i1 : i2 + 1, j1 : j2 + 1, k1 : k2 + 1] = 0 data1[i_target, j_target, k_target] = 1 valp1 = 1.5 valm1 = 0.5 - data1[i_target - 1, j_target, k_target] = valp1 # x flipped - data1[i_target + 1, j_target, k_target] = valm1 # x flipped + data1[i_target - 1, j_target, k_target] = valp1 # x flipped + data1[i_target + 1, j_target, k_target] = valm1 # x flipped data1[i_target, j_target - 1, k_target] = valm1 data1[i_target, j_target + 1, k_target] = valp1 data1[i_target, j_target, k_target - 1] = valm1 data1[i_target, j_target, k_target + 1] = valp1 - - aff1 = np.array([[-1, 0, 0, 5], - [0, 0, 1, -10], - [0, 1, 0, -30], - [0, 0, 0, 1]]) - + + aff1 = np.array([[-1, 0, 0, 5], [0, 0, 1, -10], [0, 1, 0, -30], [0, 0, 0, 1]]) + o1 = OrthoSlicer3D(data1, aff1) - - class Event: - def __init__(self): - self.name = "simulated mouse event" + + class Event: + def __init__(self): + self.name = 'simulated mouse event' self.button = 1 - + event = Event() event.xdata = k_target event.ydata = j_target event.inaxes = o1._ims[0].axes o1._on_mouse(event) - + event.inaxes = o1._ims[1].axes - event.xdata = (I - 1) - i_target # x flipped + event.xdata = (I - 1) - i_target # x flipped event.ydata = j_target o1._on_mouse(event) - + event.inaxes = o1._ims[2].axes - event.xdata = (I - 1) - i_target # x flipped + event.xdata = (I - 1) - i_target # x flipped event.ydata = k_target o1._on_mouse(event) - + sag = o1._ims[0].get_array() cor = o1._ims[1].get_array() axi = o1._ims[2].get_array() - - assert_array_equal(sag, data1[i_target, :, :]) # - assert_array_equal(cor, data1[::-1, :, k_target].T) # x flipped - assert_array_equal(axi, data1[::-1, j_target, :].T) # x flipped + + assert_array_equal(sag, data1[i_target, :, :]) # + assert_array_equal(cor, data1[::-1, :, k_target].T) # x flipped + assert_array_equal(axi, data1[::-1, j_target, :].T) # x flipped return None @@ -212,15 +208,15 @@ def __init__(self): def test_viewer_nonRAS_on_scroll(): """ test scrolling on non RAS matrices - + """ - # This affine simulates an acquisition on a quadruped subject that is in a prone position. - # This corresponds to an acquisition with: + # This affine simulates an acquisition on a quadruped subject that is in a prone position. + # This corresponds to an acquisition with: # - LR inverted on scanner x (i) # - IS on scanner y (j) # - PA on scanner z (k) # This example enables to test also OrthoSlicer3D properties `_flips` and `_order`. - + (I, J, K) = (10, 20, 40) data1 = np.random.rand(I, J, K) (i_target, j_target, k_target) = (2, 14, 12) @@ -230,40 +226,35 @@ def test_viewer_nonRAS_on_scroll(): j2 = j_target + 3 k1 = k_target - 4 k2 = k_target + 4 - data1[i1: i2 + 1, j1: j2 + 1, k1: k2 + 1] = 0 + data1[i1 : i2 + 1, j1 : j2 + 1, k1 : k2 + 1] = 0 data1[i_target, j_target, k_target] = 1 valp1 = 1.5 valm1 = 0.5 - data1[i_target - 1, j_target, k_target] = valp1 # x flipped - data1[i_target + 1, j_target, k_target] = valm1 # x flipped + data1[i_target - 1, j_target, k_target] = valp1 # x flipped + data1[i_target + 1, j_target, k_target] = valm1 # x flipped data1[i_target, j_target - 1, k_target] = valm1 data1[i_target, j_target + 1, k_target] = valp1 data1[i_target, j_target, k_target - 1] = valm1 data1[i_target, j_target, k_target + 1] = valp1 - - aff1 = np.array([[-1, 0, 0, 5], - [0, 0, 1, -10], - [0, 1, 0, -30], - [0, 0, 0, 1]]) - + + aff1 = np.array([[-1, 0, 0, 5], [0, 0, 1, -10], [0, 1, 0, -30], [0, 0, 0, 1]]) + o1 = OrthoSlicer3D(data1, aff1) - - class Event: - def __init__(self): - self.name = "simulated mouse event" + + class Event: + def __init__(self): + self.name = 'simulated mouse event' self.button = None self.key = None - - i_last = data1.shape[0] - 1 - + [x_t, y_t, z_t] = list(aff1.dot(np.array([i_target, j_target, k_target, 1]))[:3]) # print(x_t, y_t, z_t) # scanner positions are x_t=3, y_t=2, z_t=16 - + event = Event() - + # Sagittal plane - one scroll up - # x coordinate is flipped so index decrease by 1 + # x coordinate is flipped so index decrease by 1 o1.set_position(x_t, y_t, z_t) event.inaxes = o1._ims[0].axes event.button = 'up' @@ -272,10 +263,10 @@ def __init__(self): cor = o1._ims[1].get_array() axi = o1._ims[2].get_array() assert_array_equal(sag, data1[i_target - 1, :, :]) - assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x - assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x - - # Sagittal plane - one scrolled down + assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x + assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x + + # Sagittal plane - one scrolled down o1.set_position(x_t, y_t, z_t) event.button = 'down' o1._on_scroll(event) @@ -285,9 +276,9 @@ def __init__(self): assert_array_equal(sag, data1[i_target + 1, :, :]) assert_array_equal(cor, data1[::-1, :, k_target].T) assert_array_equal(axi, data1[::-1, j_target, :].T) - + # Coronal plane - one scroll up - # y coordinate is increase by 1 + # y coordinate is increase by 1 o1.set_position(x_t, y_t, z_t) event.inaxes = o1._ims[1].axes event.button = 'up' @@ -296,10 +287,12 @@ def __init__(self): cor = o1._ims[1].get_array() axi = o1._ims[2].get_array() assert_array_equal(sag, data1[i_target, :, :]) - assert_array_equal(cor, data1[::-1, :, k_target + 1].T) # ::-1 because the array is flipped in x - assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x - - # Coronal plane - one scrolled down + assert_array_equal( + cor, data1[::-1, :, k_target + 1].T + ) # ::-1 because the array is flipped in x + assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x + + # Coronal plane - one scrolled down o1.set_position(x_t, y_t, z_t) event.button = 'down' o1._on_scroll(event) @@ -309,9 +302,9 @@ def __init__(self): assert_array_equal(sag, data1[i_target, :, :]) assert_array_equal(cor, data1[::-1, :, k_target - 1].T) assert_array_equal(axi, data1[::-1, j_target, :].T) - + # Axial plane - one scroll up - # y is increase by 1 + # y is increase by 1 o1.set_position(x_t, y_t, z_t) event.inaxes = o1._ims[2].axes event.button = 'up' @@ -320,10 +313,12 @@ def __init__(self): cor = o1._ims[1].get_array() axi = o1._ims[2].get_array() assert_array_equal(sag, data1[i_target, :, :]) - assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x - assert_array_equal(axi, data1[::-1, j_target + 1, :].T) # ::-1 because the array is flipped in x - - # Axial plane - one scrolled down + assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x + assert_array_equal( + axi, data1[::-1, j_target + 1, :].T + ) # ::-1 because the array is flipped in x + + # Axial plane - one scrolled down o1.set_position(x_t, y_t, z_t) event.button = 'down' o1._on_scroll(event) @@ -333,4 +328,4 @@ def __init__(self): assert_array_equal(sag, data1[i_target, :, :]) assert_array_equal(cor, data1[::-1, :, k_target].T) assert_array_equal(axi, data1[::-1, j_target - 1, :].T) - return None \ No newline at end of file + return None From 9aaacfa6ee7ad548a83e2a8349d4c1b36078fe14 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:12:06 +0200 Subject: [PATCH 632/702] STY: Applty ruff/pyupgrade rule UP006 UP006 Use `type` instead of `Type` for type annotation UP006 Use `tuple` instead of `ty.Tuple` for type annotation --- nibabel/gifti/gifti.py | 4 ++-- nibabel/spatialimages.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 7c5c3c4fb0..caee7c3500 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -18,7 +18,7 @@ import sys import warnings from copy import copy -from typing import Type, cast +from typing import cast import numpy as np @@ -598,7 +598,7 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): # The parser will in due course be a GiftiImageParser, but we can't set # that now, because it would result in a circular import. We set it after # the class has been defined, at the end of the class definition. - parser: Type[xml.XmlParser] + parser: type[xml.XmlParser] def __init__( self, diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 96f8115a22..f4d27791b2 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -169,8 +169,8 @@ def set_data_dtype(self, dtype: npt.DTypeLike) -> None: ... @ty.runtime_checkable class SpatialProtocol(ty.Protocol): def get_data_dtype(self) -> np.dtype: ... - def get_data_shape(self) -> ty.Tuple[int, ...]: ... - def get_zooms(self) -> ty.Tuple[float, ...]: ... + def get_data_shape(self) -> tuple[int, ...]: ... + def get_zooms(self) -> tuple[float, ...]: ... class HeaderDataError(Exception): From 4c784a700578d69792724deea24f1633a9942b85 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:13:09 +0200 Subject: [PATCH 633/702] STY: Apply ruff/pyupgrade rule UP031 UP031 Use format specifiers instead of percent format --- nibabel/analyze.py | 4 +++- nibabel/cmdline/diff.py | 2 +- nibabel/cmdline/ls.py | 2 +- nibabel/dft.py | 2 +- nibabel/freesurfer/mghformat.py | 4 +++- nibabel/nifti1.py | 2 +- nibabel/tests/test_data.py | 2 +- nibabel/tests/test_nifti1.py | 2 +- 8 files changed, 12 insertions(+), 8 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index e697181719..34597319d6 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -515,7 +515,9 @@ def data_to_fileobj(self, data, fileobj, rescale=True): data = np.asanyarray(data) shape = self.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) + raise HeaderDataError( + 'Data should be shape ({})'.format(', '.join(str(s) for s in shape)) + ) out_dtype = self.get_data_dtype() if rescale: try: diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 1231a778f4..36760f7ebb 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -302,7 +302,7 @@ def display_diff(files, diff): for item in value: if isinstance(item, dict): - item_str = ', '.join('%s: %s' % i for i in item.items()) + item_str = ', '.join('{}: {}'.format(*i) for i in item.items()) elif item is None: item_str = '-' else: diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index ff41afbd0a..f79c27f0c5 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -112,7 +112,7 @@ def proc_file(f, opts): and (h.has_data_slope or h.has_data_intercept) and not h.get_slope_inter() in ((1.0, 0.0), (None, None)) ): - row += ['@l*%.3g+%.3g' % h.get_slope_inter()] + row += ['@l*{:.3g}+{:.3g}'.format(*h.get_slope_inter())] else: row += [''] diff --git a/nibabel/dft.py b/nibabel/dft.py index d9e3359998..e63c9c4796 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -231,7 +231,7 @@ def __getattribute__(self, name): WHERE storage_instance = ? ORDER BY directory, name""" c.execute(query, (self.uid,)) - val = ['%s/%s' % tuple(row) for row in c] + val = ['{}/{}'.format(*tuple(row)) for row in c] self.files = val return val diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 533d235927..6efa67ffa8 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -570,7 +570,9 @@ def _write_data(self, mghfile, data, header): """ shape = header.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) + raise HeaderDataError( + 'Data should be shape ({})'.format(', '.join(str(s) for s in shape)) + ) offset = header.get_data_offset() out_dtype = header.get_data_dtype() array_to_file(data, mghfile, out_dtype, offset) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index ecd94c10de..4788947315 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -552,7 +552,7 @@ def get_sizeondisk(self): return np.sum([e.get_sizeondisk() for e in self]) def __repr__(self): - return 'Nifti1Extensions(%s)' % ', '.join(str(e) for e in self) + return 'Nifti1Extensions({})'.format(', '.join(str(e) for e in self)) def write_to(self, fileobj, byteswap): """Write header extensions to fileobj diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 5697752ea4..511fa7f857 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -160,7 +160,7 @@ def test_data_path(with_nimd_env): tmpfile = pjoin(tmpdir, 'another_example.ini') with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') - fobj.write('path = %s\n' % '/path/two') + fobj.write('path = {}\n'.format('/path/two')) assert get_data_path() == tst_list + ['/path/two'] + old_pth diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 5ee4fb3c15..819a270811 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -538,7 +538,7 @@ def test_slice_times(self): hdr.set_slice_duration(0.1) # We need a function to print out the Nones and floating point # values in a predictable way, for the tests below. - _stringer = lambda val: val is not None and '%2.1f' % val or None + _stringer = lambda val: val is not None and '{:2.1f}'.format(val) or None _print_me = lambda s: list(map(_stringer, s)) # The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] From fe7d97c49faac5e2946dc320096d8a3f0d856e9f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:13:50 +0200 Subject: [PATCH 634/702] STY: Apply ruff/pyupgrade rule UP032 UP032 Use f-string instead of `format` call --- nibabel/cmdline/dicomfs.py | 4 +--- nibabel/tests/test_nifti1.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 552bb09319..afd994b151 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -193,9 +193,7 @@ def release(self, path, flags, fh): def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage='{} [OPTIONS] '.format( - os.path.basename(sys.argv[0]) - ), + usage=f'{os.path.basename(sys.argv[0])} [OPTIONS] ', version='%prog ' + nib.__version__, ) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 819a270811..5a04958587 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -538,7 +538,7 @@ def test_slice_times(self): hdr.set_slice_duration(0.1) # We need a function to print out the Nones and floating point # values in a predictable way, for the tests below. - _stringer = lambda val: val is not None and '{:2.1f}'.format(val) or None + _stringer = lambda val: val is not None and f'{val:2.1f}' or None _print_me = lambda s: list(map(_stringer, s)) # The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] From bf3e23e1d91ed68ea4b8eadba19bfc57ecc893ce Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:24:06 +0200 Subject: [PATCH 635/702] STY: Enforce ruff/pyupgrade rules (UP) --- pyproject.toml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ff5168f9c6..2840119c4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -115,7 +115,12 @@ line-length = 99 exclude = ["doc", "nibabel/externals", "tools", "version.py", "versioneer.py"] [tool.ruff.lint] -select = ["F", "I", "Q"] +select = [ + "F", + "I", + "Q", + "UP", +] ignore = [ # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", From 7a23f67eb431330a7aca17a0eca9d4bae7be6d8e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:19:42 +0200 Subject: [PATCH 636/702] STY: Apply ruff/Pylint rule PLE0101 PLE0101 Explicit return in `__init__` --- nibabel/openers.py | 2 +- nibabel/tmpdirs.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/openers.py b/nibabel/openers.py index c3fa9a4783..9a306d4e47 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -68,7 +68,7 @@ def __init__( raise TypeError('Must define either fileobj or filename') # Cast because GzipFile.myfileobj has type io.FileIO while open returns ty.IO fileobj = self.myfileobj = ty.cast(io.FileIO, open(filename, modestr)) - return super().__init__( + super().__init__( filename='', mode=modestr, compresslevel=compresslevel, diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 9d67f6acb7..2bcf9fdeba 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -54,7 +54,7 @@ def __init__(self, suffix='', prefix=tempfile.template, dir=None): >>> os.path.exists(tmpdir) False """ - return super().__init__(suffix, prefix, dir) + super().__init__(suffix, prefix, dir) @contextmanager From 747338cd86ea958de1f1b45e7d5d87ebe7d1a222 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:53:36 +0200 Subject: [PATCH 637/702] STY: Enforce ruff/Pylint rules, errors only (PLE) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 2840119c4f..915ea9b815 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,6 +118,7 @@ exclude = ["doc", "nibabel/externals", "tools", "version.py", "versioneer.py"] select = [ "F", "I", + "PLE", "Q", "UP", ] From 930cc28a306d211e09228ca1ebef8966586b17e2 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:22:45 +0200 Subject: [PATCH 638/702] STY: Apply ruff/flake8-raise rule RSE102 RSE102 Unnecessary parentheses on raised exception --- nibabel/streamlines/tractogram_file.py | 6 +++--- nibabel/tests/test_volumeutils.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index 557261e9a0..65add3e2f2 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -74,7 +74,7 @@ def is_correct_format(cls, fileobj): Returns True if `fileobj` is in the right streamlines file format, otherwise returns False. """ - raise NotImplementedError() + raise NotImplementedError @classmethod def create_empty_header(cls): @@ -101,7 +101,7 @@ def load(cls, fileobj, lazy_load=True): Returns an object containing tractogram data and header information. """ - raise NotImplementedError() + raise NotImplementedError @abstractmethod def save(self, fileobj): @@ -113,4 +113,4 @@ def save(self, fileobj): If string, a filename; otherwise an open file-like object opened and ready to write. """ - raise NotImplementedError() + raise NotImplementedError diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 07ca9a6baa..9d321f07e4 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -989,7 +989,7 @@ def test_seek_tell_logic(): class BabyBio(BytesIO): def seek(self, *args): - raise OSError() + raise OSError bio = BabyBio() # Fresh fileobj, position 0, can't seek - error From 47df196256e67a248abf664d84c681c54f0bd784 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:47:06 +0200 Subject: [PATCH 639/702] STY: Enforce ruff/flake8-raise rules (RSE) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 2840119c4f..55e96d992c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -119,6 +119,7 @@ select = [ "F", "I", "Q", + "RSE", "UP", ] ignore = [ From aa1315277b5f2b8ff9cfda4e16b7ab98a57eecf4 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:05:30 +0200 Subject: [PATCH 640/702] STY: Apply ruff/flake8-bugbear rule B009 B009 Do not call `getattr` with a constant attribute value. It is not any safer than normal property access. --- nibabel/tests/conftest.py | 2 +- nibabel/viewers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/conftest.py b/nibabel/tests/conftest.py index 3cf54a34c5..fb13708450 100644 --- a/nibabel/tests/conftest.py +++ b/nibabel/tests/conftest.py @@ -6,7 +6,7 @@ # Generate dynamic fixtures def pytest_generate_tests(metafunc): if 'supported_dtype' in metafunc.fixturenames: - if metafunc.cls is None or not getattr(metafunc.cls, 'image_class'): + if metafunc.cls is None or not metafunc.cls.image_class: raise pytest.UsageError( 'Attempting to use supported_dtype fixture outside an image test case' ) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 0dc2f0dafc..4dd8a1c258 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -447,7 +447,7 @@ def _set_position(self, x, y, z, notify=True): # Matplotlib handlers #################################################### def _in_axis(self, event): """Return axis index if within one of our axes, else None""" - if getattr(event, 'inaxes') is None: + if event.inaxes is None: return None for ii, ax in enumerate(self._axes): if event.inaxes is ax: From d6ea77beed3db1361c04165a054f4081cf9b8dd8 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:07:35 +0200 Subject: [PATCH 641/702] STY: Apply ruff/flake8-bugbear rule B015 B015 Pointless comparison. Did you mean to assign a value? Otherwise, prepend `assert` or remove it. --- nibabel/gifti/tests/test_parse_gifti_fast.py | 4 ++-- nibabel/tests/test_openers.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 8cb7c96794..6ca54df038 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -241,7 +241,7 @@ def test_load_dataarray1(): me = img.darrays[0].meta assert 'AnatomicalStructurePrimary' in me assert 'AnatomicalStructureSecondary' in me - me['AnatomicalStructurePrimary'] == 'CortexLeft' + assert me['AnatomicalStructurePrimary'] == 'CortexLeft' assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) assert xform_codes.niistring[img.darrays[0].coordsys.dataspace] == 'NIFTI_XFORM_TALAIRACH' assert xform_codes.niistring[img.darrays[0].coordsys.xformspace] == 'NIFTI_XFORM_TALAIRACH' @@ -279,7 +279,7 @@ def test_load_dataarray4(): def test_dataarray5(): img5 = load(DATA_FILE5) for da in img5.darrays: - gifti_endian_codes.byteorder[da.endian] == 'little' + assert gifti_endian_codes.byteorder[da.endian] == 'little' assert_array_almost_equal(img5.darrays[0].data, DATA_FILE5_darr1) assert_array_almost_equal(img5.darrays[1].data, DATA_FILE5_darr2) # Round trip tested below diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 15290d5ef9..0b58794331 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -431,17 +431,17 @@ def test_DeterministicGzipFile_fileobj(): with open('test.gz', 'wb') as fobj: with DeterministicGzipFile(filename='', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum('test.gz') == ref_chksum + assert md5sum('test.gz') == ref_chksum with open('test.gz', 'wb') as fobj: with DeterministicGzipFile(fileobj=fobj, mode='wb') as gzobj: gzobj.write(msg) - md5sum('test.gz') == ref_chksum + assert md5sum('test.gz') == ref_chksum with open('test.gz', 'wb') as fobj: with DeterministicGzipFile(filename='test.gz', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum('test.gz') == ref_chksum + assert md5sum('test.gz') == ref_chksum def test_bitwise_determinism(): From f064b62e8045a60065b9a6ac48670a4def46af38 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:31:14 +0200 Subject: [PATCH 642/702] STY: Enforce ruff/flake8-bugbear rules (B) --- pyproject.toml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 2840119c4f..ead2782b23 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -116,12 +116,23 @@ exclude = ["doc", "nibabel/externals", "tools", "version.py", "versioneer.py"] [tool.ruff.lint] select = [ + "B", "F", "I", "Q", "UP", ] ignore = [ + "B006", # TODO: enable + "B008", # TODO: enable + "B007", + "B011", + "B017", # TODO: enable + "B018", + "B020", + "B023", # TODO: enable + "B028", + "B904", # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", "E111", From 16754c8f828a75dc916c25b82ae9ca150e7cd686 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:58:30 +0200 Subject: [PATCH 643/702] STY: Apply ruff/flake8-comprehensions rule C406 C406 Unnecessary `list` literal (rewrite as a `dict` literal) --- nibabel/cifti2/tests/test_cifti2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index 895b8f9597..1d9d5097c0 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -37,7 +37,7 @@ def test_cifti2_metadata(): assert len(md) == 1 assert list(iter(md)) == ['a'] assert md['a'] == 'aval' - assert md.data == dict([('a', 'aval')]) + assert md.data == {'a': 'aval'} with pytest.warns(FutureWarning): md = ci.Cifti2MetaData(metadata={'a': 'aval'}) @@ -57,7 +57,7 @@ def test_cifti2_metadata(): md['a'] = 'aval' assert md['a'] == 'aval' assert len(md) == 1 - assert md.data == dict([('a', 'aval')]) + assert md.data == {'a': 'aval'} del md['a'] assert len(md) == 0 From 9e007ece3aedff5e9518ba6e9ab95395bdabcfb6 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:59:16 +0200 Subject: [PATCH 644/702] STY: Apply ruff/flake8-comprehensions rule C413 C413 Unnecessary `list` call around `sorted()` --- nibabel/cifti2/tests/test_cifti2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index 1d9d5097c0..6382dab9d6 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -392,7 +392,7 @@ def test_matrix(): m[0] = mim_1 assert list(m.mapped_indices) == [1] m.insert(0, mim_0) - assert list(sorted(m.mapped_indices)) == [0, 1] + assert sorted(m.mapped_indices) == [0, 1] assert h.number_of_mapped_indices == 2 assert h.get_index_map(0) == mim_0 assert h.get_index_map(1) == mim_1 From a826ccdb634bd78f8ed08ad289269751acb20d53 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:00:45 +0200 Subject: [PATCH 645/702] STY: Apply ruff/flake8-comprehensions rule C416 C416 Unnecessary `dict` comprehension (rewrite using `dict()`) --- nibabel/brikhead.py | 2 +- nibabel/nicom/dicomwrappers.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index da8692efd3..d187a6b34b 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -198,7 +198,7 @@ def parse_AFNI_header(fobj): return parse_AFNI_header(src) # unpack variables in HEAD file head = fobj.read().split('\n\n') - return {key: value for key, value in map(_unpack_var, head)} + return dict(map(_unpack_var, head)) class AFNIArrayProxy(ArrayProxy): diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 3842248fd5..009880e496 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -685,9 +685,7 @@ def __init__(self, dcm_data, frame_filters=None): frame_slc_pos = [np.inner(ipp, self.slice_normal) for ipp in frame_ipps] rnd_slc_pos = np.round(frame_slc_pos, 4) uniq_slc_pos = np.unique(rnd_slc_pos) - pos_ord_map = { - val: order for val, order in zip(uniq_slc_pos, np.argsort(uniq_slc_pos)) - } + pos_ord_map = dict(zip(uniq_slc_pos, np.argsort(uniq_slc_pos))) self._frame_slc_ord = [pos_ord_map[pos] for pos in rnd_slc_pos] if len(self._frame_slc_ord) > 1: self._slice_spacing = ( From 102bbf7f750f443f6e13aee04bbffc764a67e6d4 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:01:44 +0200 Subject: [PATCH 646/702] STY: Apply ruff/flake8-comprehensions rule C419 C419 Unnecessary list comprehension --- nibabel/casting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 31e27d0e8c..042a2f415d 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -764,7 +764,7 @@ def able_int_type(values): >>> able_int_type([-1, 1]) == np.int8 True """ - if any([v % 1 for v in values]): + if any(v % 1 for v in values): return None mn = min(values) mx = max(values) From a28ce642ea707d0456579411f33049e8e2e0a9ab Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:36:57 +0200 Subject: [PATCH 647/702] STY: Enforce ruff/flake8-comprehensions rules (C4) --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index f7d116ea92..becc93366d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,6 +117,7 @@ exclude = ["doc", "nibabel/externals", "tools", "version.py", "versioneer.py"] [tool.ruff.lint] select = [ "B", + "C4", "F", "I", "PLE", @@ -135,6 +136,9 @@ ignore = [ "B023", # TODO: enable "B028", "B904", + "C401", + "C408", + "C416", # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", "E111", From 2cfabbd30632b6e2231f061c997dc5be20611984 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:15:24 +0200 Subject: [PATCH 648/702] STY: Apply ruff/flake8-type-checking rule TCH001 TCH001 Move application import into a type-checking block --- nibabel/dataobj_images.py | 4 ++-- nibabel/imageclasses.py | 8 ++++++-- nibabel/spatialimages.py | 5 +++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 6850599014..565a228794 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -14,14 +14,14 @@ import numpy as np -from .arrayproxy import ArrayLike from .deprecated import deprecate_with_version from .filebasedimages import FileBasedHeader, FileBasedImage -from .fileholders import FileMap if ty.TYPE_CHECKING: import numpy.typing as npt + from .arrayproxy import ArrayLike + from .fileholders import FileMap from .filename_parser import FileSpec ArrayImgT = ty.TypeVar('ArrayImgT', bound='DataobjImage') diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index 20cf1cac9c..66f984e268 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -10,11 +10,11 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from .analyze import AnalyzeImage from .brikhead import AFNIImage from .cifti2 import Cifti2Image -from .dataobj_images import DataobjImage -from .filebasedimages import FileBasedImage from .freesurfer import MGHImage from .gifti import GiftiImage from .minc1 import Minc1Image @@ -25,6 +25,10 @@ from .spm2analyze import Spm2AnalyzeImage from .spm99analyze import Spm99AnalyzeImage +if TYPE_CHECKING: + from .dataobj_images import DataobjImage + from .filebasedimages import FileBasedImage + # Ordered by the load/save priority. all_image_classes: list[type[FileBasedImage]] = [ Nifti1Pair, diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index f4d27791b2..bd5ff8c11b 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -139,11 +139,9 @@ import numpy as np -from .arrayproxy import ArrayLike from .casting import sctypes_aliases from .dataobj_images import DataobjImage from .filebasedimages import FileBasedHeader, FileBasedImage -from .fileholders import FileMap from .fileslice import canonical_slicers from .orientations import apply_orientation, inv_ornt_aff from .viewers import OrthoSlicer3D @@ -157,6 +155,9 @@ if ty.TYPE_CHECKING: import numpy.typing as npt + from .arrayproxy import ArrayLike + from .fileholders import FileMap + SpatialImgT = ty.TypeVar('SpatialImgT', bound='SpatialImage') SpatialHdrT = ty.TypeVar('SpatialHdrT', bound='SpatialHeader') From bb221918bb1c644d4e944fb3219d18cf7ad82fc3 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:16:18 +0200 Subject: [PATCH 649/702] STY: Apply ruff/flake8-type-checking rule TCH002 TCH002 Move third-party import into a type-checking block --- nibabel/testing/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 992ef2ead4..f41c657f5f 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -26,12 +26,14 @@ from .helpers import assert_data_similar, bytesio_filemap, bytesio_round_trip from .np_features import memmap_after_ufunc +if ty.TYPE_CHECKING: + from importlib_resources.abc import Traversable + try: from importlib.resources import as_file, files from importlib.resources.abc import Traversable except ImportError: # PY38 from importlib_resources import as_file, files - from importlib_resources.abc import Traversable def get_test_data( From 0a27464e27682b48de188e4bf4e97b91c0c8fdd8 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:19:08 +0200 Subject: [PATCH 650/702] STY: Apply ruff/flake8-type-checking rule TCH003 TCH003 Move standard library import into a type-checking block --- nibabel/_compression.py | 3 ++- nibabel/fileholders.py | 4 +++- nibabel/optpkg.py | 4 +++- nibabel/spatialimages.py | 5 +++-- nibabel/testing/__init__.py | 3 +-- nibabel/volumeutils.py | 3 ++- 6 files changed, 14 insertions(+), 8 deletions(-) diff --git a/nibabel/_compression.py b/nibabel/_compression.py index f697fa54cc..871be2629f 100644 --- a/nibabel/_compression.py +++ b/nibabel/_compression.py @@ -12,12 +12,13 @@ import bz2 import gzip -import io import typing as ty from .optpkg import optional_package if ty.TYPE_CHECKING: + import io + import indexed_gzip # type: ignore[import] import pyzstd diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index 3db4c62a9e..df7c34af63 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -10,12 +10,14 @@ from __future__ import annotations -import io import typing as ty from copy import copy from .openers import ImageOpener +if ty.TYPE_CHECKING: + import io + class FileHolderError(Exception): pass diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index bfe6a629cc..90b8ded518 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -3,12 +3,14 @@ from __future__ import annotations import typing as ty -from types import ModuleType from packaging.version import Version from .tripwire import TripWire +if ty.TYPE_CHECKING: + from types import ModuleType + def _check_pkg_version(min_version: str | Version) -> ty.Callable[[ModuleType], bool]: min_ver = Version(min_version) if isinstance(min_version, str) else min_version diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index bd5ff8c11b..ce8ee3c6e6 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -132,9 +132,7 @@ from __future__ import annotations -import io import typing as ty -from collections.abc import Sequence from typing import Literal import numpy as np @@ -153,6 +151,9 @@ from functools import lru_cache as cache if ty.TYPE_CHECKING: + import io + from collections.abc import Sequence + import numpy.typing as npt from .arrayproxy import ArrayLike diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index f41c657f5f..be111747b2 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -27,11 +27,10 @@ from .np_features import memmap_after_ufunc if ty.TYPE_CHECKING: - from importlib_resources.abc import Traversable + from importlib.resources.abc import Traversable try: from importlib.resources import as_file, files - from importlib.resources.abc import Traversable except ImportError: # PY38 from importlib_resources import as_file, files diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index c2387f0949..6e43f79186 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -10,7 +10,6 @@ from __future__ import annotations -import io import sys import typing as ty import warnings @@ -25,6 +24,8 @@ from .externals.oset import OrderedSet if ty.TYPE_CHECKING: + import io + import numpy.typing as npt Scalar = np.number | float From 8ca899aa43c0b690dec4a04a44a723da831463d8 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:51:49 +0200 Subject: [PATCH 651/702] STY: Enforce ruff/flake8-type-checking rules (TCH) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index f7d116ea92..d45c4e19fd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,6 +122,7 @@ select = [ "PLE", "Q", "RSE", + "TCH", "UP", ] ignore = [ From 7af724bf5294257b315424297f0c9154259aaf92 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:52:04 +0200 Subject: [PATCH 652/702] STY: Apply ruff/flake8-pie rule PIE807 PIE807 Prefer `list` over useless lambda --- nibabel/streamlines/tests/test_tractogram.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/streamlines/tests/test_tractogram.py b/nibabel/streamlines/tests/test_tractogram.py index 9159688548..72b84fac6e 100644 --- a/nibabel/streamlines/tests/test_tractogram.py +++ b/nibabel/streamlines/tests/test_tractogram.py @@ -49,8 +49,8 @@ def make_fake_tractogram( ): """Make multiple streamlines according to provided requirements.""" all_streamlines = [] - all_data_per_point = defaultdict(lambda: []) - all_data_per_streamline = defaultdict(lambda: []) + all_data_per_point = defaultdict(list) + all_data_per_streamline = defaultdict(list) for nb_points in list_nb_points: data = make_fake_streamline( nb_points, data_per_point_shapes, data_for_streamline_shapes, rng From b4fb300525adacf7167b07ccf89a04232e72c866 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:52:54 +0200 Subject: [PATCH 653/702] STY: Apply ruff/flake8-pie rule PIE808 PIE808 Unnecessary `start` argument in `range` --- nibabel/ecat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 34ff06323c..c4b55624f9 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -957,7 +957,7 @@ def to_file_map(self, file_map=None): hdr.write_to(hdrf) # Write every frames - for index in range(0, self.header['num_frames']): + for index in range(self.header['num_frames']): # Move to subheader offset frame_offset = subheaders._get_frame_offset(index) - 512 imgf.seek(frame_offset) From 576b74bd1ef5d0373cfe5d17bc8ce06f366bc9c0 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:39:54 +0200 Subject: [PATCH 654/702] STY: Enforce ruff/flake8-pie rules (PIE) --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index becc93366d..7f416c13ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -120,6 +120,7 @@ select = [ "C4", "F", "I", + "PIE", "PLE", "Q", "RSE", @@ -139,6 +140,7 @@ ignore = [ "C401", "C408", "C416", + "PIE790", # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", "E111", From d53b64cee8ed919ad24ba40657eb1ea37833e364 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 15:44:48 +0200 Subject: [PATCH 655/702] STY: Apply ruff/refurb rule FURB167 FURB167 Use of regular expression alias --- nibabel/nicom/ascconv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index 8ec72fb3ec..6d72436039 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -10,7 +10,7 @@ ASCCONV_RE = re.compile( r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###', - flags=re.M | re.S, + flags=re.MULTILINE | re.DOTALL, ) From 1abcdec867c54c3c58e2d8a7c0215a128e2c9f69 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:51:25 +0200 Subject: [PATCH 656/702] STY: Enforce ruff/refurb rules (FURB) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index becc93366d..316abdecad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -119,6 +119,7 @@ select = [ "B", "C4", "F", + "FURB", "I", "PLE", "Q", From 5cc97c6ab4746589fac78d84a7d5341c20f70cd1 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:49:32 +0200 Subject: [PATCH 657/702] STY: Apply ruff/flake8-pyi rule PYI034 PYI034 `__enter__` methods usually return `self` at runtime --- nibabel/openers.py | 3 ++- tox.ini | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nibabel/openers.py b/nibabel/openers.py index 9a306d4e47..35b10c20a4 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -22,6 +22,7 @@ from types import TracebackType from _typeshed import WriteableBuffer + from typing_extensions import Self ModeRT = ty.Literal['r', 'rt'] ModeRB = ty.Literal['rb'] @@ -246,7 +247,7 @@ def close_if_mine(self) -> None: if self.me_opened: self.close() - def __enter__(self) -> Opener: + def __enter__(self) -> Self: return self def __exit__( diff --git a/tox.ini b/tox.ini index 5df35c8d38..675526f944 100644 --- a/tox.ini +++ b/tox.ini @@ -181,6 +181,7 @@ deps = numpy pyzstd importlib_resources + typing_extensions skip_install = true commands = mypy nibabel From df862cce6f9c90536aa0b44337822d64ce792326 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:41:32 +0200 Subject: [PATCH 658/702] STY: Enforce ruff/flake8-pyi rules (PYI) --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index fa3f881162..0dd49c847d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -123,6 +123,7 @@ select = [ "I", "PIE", "PLE", + "PYI", "Q", "RSE", "TCH", @@ -143,6 +144,7 @@ ignore = [ "C408", "C416", "PIE790", + "PYI024", # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", "E111", From 38fa63868dc6180641f806a063bfa54d85dcd33e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:07:42 +0200 Subject: [PATCH 659/702] STY: Apply ruff/flynt rule FLY002 FLY002 Consider f-string instead of string join --- nibabel/batteryrunners.py | 2 +- nibabel/gifti/tests/test_gifti.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index 30727f3962..860b9b993c 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -252,7 +252,7 @@ def __str__(self): def message(self): """formatted message string, including fix message if present""" if self.fix_msg: - return '; '.join((self.problem_msg, self.fix_msg)) + return f'{self.problem_msg}; {self.fix_msg}' return self.problem_msg def log_raise(self, logger, error_level=40): diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 1cead0d928..97c929ac4c 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -423,13 +423,13 @@ def test_gifti_coord(capsys): gcs.xform = None gcs.print_summary() captured = capsys.readouterr() - assert captured.out == '\n'.join( - [ - 'Dataspace: NIFTI_XFORM_UNKNOWN', - 'XFormSpace: NIFTI_XFORM_UNKNOWN', - 'Affine Transformation Matrix: ', - ' None\n', - ] + assert ( + captured.out + == """Dataspace: NIFTI_XFORM_UNKNOWN +XFormSpace: NIFTI_XFORM_UNKNOWN +Affine Transformation Matrix: + None + """ ) gcs.to_xml() From 1c8010bc3d51c031a393558192aa99b30782cc06 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:57:58 +0200 Subject: [PATCH 660/702] STY: Enforce ruff/flynt rules (FLY) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 0dd49c847d..3e2ffa0b43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -119,6 +119,7 @@ select = [ "B", "C4", "F", + "FLY", "FURB", "I", "PIE", From 27baa683961cdfd42153d368c79ee3ea32ef4ab2 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 23 Sep 2024 09:34:11 -0400 Subject: [PATCH 661/702] sty: Remove unnecessary trailing whitespace in summary --- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/tests/test_gifti.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index caee7c3500..c983a14dfd 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -374,7 +374,7 @@ def _to_xml_element(self): def print_summary(self): print('Dataspace: ', xform_codes.niistring[self.dataspace]) print('XFormSpace: ', xform_codes.niistring[self.xformspace]) - print('Affine Transformation Matrix: \n', self.xform) + print('Affine Transformation Matrix:\n', self.xform) def _data_tag_element(dataarray, encoding, dtype, ordering): diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 97c929ac4c..416faf3c84 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -425,11 +425,12 @@ def test_gifti_coord(capsys): captured = capsys.readouterr() assert ( captured.out - == """Dataspace: NIFTI_XFORM_UNKNOWN + == """\ +Dataspace: NIFTI_XFORM_UNKNOWN XFormSpace: NIFTI_XFORM_UNKNOWN -Affine Transformation Matrix: +Affine Transformation Matrix: None - """ +""" ) gcs.to_xml() From aeb7a8d2a627afc450618ae844101e1f8dfb98ce Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 15:56:07 +0200 Subject: [PATCH 662/702] STY: Apply ruff/Perflint rule PERF102 PERF102 When using only the keys of a dict use the `keys()` method PERF102 When using only the values of a dict use the `values()` method --- nibabel/streamlines/tests/test_streamlines.py | 8 ++++---- nibabel/testing/helpers.py | 2 +- nibabel/tests/test_analyze.py | 8 ++++---- nibabel/tests/test_files_interface.py | 4 ++-- nibabel/tests/test_nifti1.py | 2 +- nibabel/tests/test_spm99analyze.py | 4 ++-- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 857e64fec9..740b4c2616 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -207,7 +207,7 @@ def test_save_tractogram_file(self): def test_save_empty_file(self): tractogram = Tractogram(affine_to_rasmm=np.eye(4)) - for ext, cls in FORMATS.items(): + for ext in FORMATS: with InTemporaryDirectory(): filename = 'streamlines' + ext nib.streamlines.save(tractogram, filename) @@ -216,7 +216,7 @@ def test_save_empty_file(self): def test_save_simple_file(self): tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) - for ext, cls in FORMATS.items(): + for ext in FORMATS: with InTemporaryDirectory(): filename = 'streamlines' + ext nib.streamlines.save(tractogram, filename) @@ -262,7 +262,7 @@ def test_save_complex_file(self): def test_save_sliced_tractogram(self): tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) original_tractogram = tractogram.copy() - for ext, cls in FORMATS.items(): + for ext in FORMATS: with InTemporaryDirectory(): filename = 'streamlines' + ext nib.streamlines.save(tractogram[::2], filename) @@ -283,7 +283,7 @@ def test_save_from_generator(self): tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) # Just to create a generator - for ext, _ in FORMATS.items(): + for ext in FORMATS: filtered = (s for s in tractogram.streamlines if True) lazy_tractogram = LazyTractogram(lambda: filtered, affine_to_rasmm=np.eye(4)) diff --git a/nibabel/testing/helpers.py b/nibabel/testing/helpers.py index ae859d6572..ad4bf258cd 100644 --- a/nibabel/testing/helpers.py +++ b/nibabel/testing/helpers.py @@ -14,7 +14,7 @@ def bytesio_filemap(klass): """Return bytes io filemap for this image class `klass`""" file_map = klass.make_file_map() - for name, fileholder in file_map.items(): + for fileholder in file_map.values(): fileholder.fileobj = BytesIO() fileholder.pos = 0 return file_map diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index cb7b8d686d..d3c6211bfc 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -728,7 +728,7 @@ def test_data_hdr_cache(self): IC = self.image_class # save an image to a file map fm = IC.make_file_map() - for key, value in fm.items(): + for key in fm: fm[key].fileobj = BytesIO() shape = (2, 3, 4) data = np.arange(24, dtype=np.int8).reshape(shape) @@ -831,7 +831,7 @@ def test_header_updating(self): hdr = img.header hdr.set_zooms((4, 5, 6)) # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() hdr_back = img.from_file_map(img.file_map).header @@ -842,7 +842,7 @@ def test_header_updating(self): assert_array_equal(hdr.get_zooms(), (2, 3, 4)) # Modify affine in-place? Update on save. img.affine[0, 0] = 9 - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() hdr_back = img.from_file_map(img.file_map).header @@ -864,7 +864,7 @@ def test_pickle(self): assert_array_equal(img.get_fdata(), img2.get_fdata()) assert img.header == img2.header # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() img_prox = img.from_file_map(img.file_map) diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 07e394eca4..b3562b6083 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -28,7 +28,7 @@ def test_files_spatialimages(): ] for klass in klasses: file_map = klass.make_file_map() - for key, value in file_map.items(): + for value in file_map.values(): assert value.filename is None assert value.fileobj is None assert value.pos == 0 @@ -41,7 +41,7 @@ def test_files_spatialimages(): img = klass(arr.astype(np.float32), aff) else: img = klass(arr, aff) - for key, value in img.file_map.items(): + for value in img.file_map.values(): assert value.filename is None assert value.fileobj is None assert value.pos == 0 diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index ec4b8674eb..52e38fded2 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -820,7 +820,7 @@ def _qform_rt(self, img): hdr['qform_code'] = 3 hdr['sform_code'] = 4 # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() return img.from_file_map(img.file_map) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index ada92d3b05..26098d8ede 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -423,7 +423,7 @@ def test_mat_read(self): aff = np.diag([2, 3, 4, 1]) # no LR flip in affine img = img_klass(arr, aff) fm = img.file_map - for key, value in fm.items(): + for value in fm.values(): value.fileobj = BytesIO() # Test round trip img.to_file_map() @@ -475,7 +475,7 @@ def test_none_affine(self): img = img_klass(np.zeros((2, 3, 4)), None) aff = img.header.get_best_affine() # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() img_back = img.from_file_map(img.file_map) From d6b6c3b1590d9644217923ab7cb1708eb8c694da Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:01:28 +0200 Subject: [PATCH 663/702] STY: Apply ruff/Perflint rule PERF401 PERF401 Use a list comprehension to create a transformed list --- nibabel/data.py | 3 +-- nibabel/nicom/tests/test_dicomwrappers.py | 7 +++---- nibabel/tests/test_euler.py | 6 +----- nibabel/tests/test_filehandles.py | 3 +-- 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/nibabel/data.py b/nibabel/data.py index c49580d09b..8ea056d8e7 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -87,8 +87,7 @@ def list_files(self, relative=True): for base, dirs, files in os.walk(self.base_path): if relative: base = base[len(self.base_path) + 1 :] - for filename in files: - out_list.append(pjoin(base, filename)) + out_list.extend(pjoin(base, filename) for filename in files) return out_list diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 55c27df50a..db3f667518 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -429,10 +429,9 @@ def fake_shape_dependents( class PrintBase: def __repr__(self): - attr_strs = [] - for attr in dir(self): - if attr[0].isupper(): - attr_strs.append(f'{attr}={getattr(self, attr)}') + attr_strs = [ + f'{attr}={getattr(self, attr)}' for attr in dir(self) if attr[0].isupper() + ] return f"{self.__class__.__name__}({', '.join(attr_strs)})" class DimIdxSeqElem(pydicom.Dataset): diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index b0c965c399..3cc07e8f5d 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -21,12 +21,8 @@ FLOAT_EPS = np.finfo(np.float64).eps # Example rotations """ -eg_rots = [] params = np.arange(-pi * 2, pi * 2.5, pi / 2) -for x in params: - for y in params: - for z in params: - eg_rots.append((x, y, z)) +eg_rots = [(x, y, z) for x in params for y in params for z in params] def x_only(x): diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 506a623758..93eb284dfb 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -33,8 +33,7 @@ def test_multiload(): tmpdir = mkdtemp() fname = pjoin(tmpdir, 'test.img') save(img, fname) - for i in range(N): - imgs.append(load(fname)) + imgs.extend(load(fname) for i in range(N)) finally: del img, imgs shutil.rmtree(tmpdir) From 0a5af04eb6e68946f0310dcedb3be36d79233655 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:49:52 +0200 Subject: [PATCH 664/702] STY: Enforce ruff/Perflint rules (PERF) --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index f45532e81f..0706e08764 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,6 +122,7 @@ select = [ "F", "FURB", "I", + "PERF", "PIE", "PLE", "PYI", @@ -144,6 +145,7 @@ ignore = [ "C401", "C408", "C416", + "PERF203", "PIE790", "PYI024", # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules From 326addc5d98968a50f9cec8f58b8110557e448c0 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:03:49 +0200 Subject: [PATCH 665/702] STY: Consistency Co-authored-by: Chris Markiewicz --- nibabel/tests/test_analyze.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index d3c6211bfc..befc920f1e 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -728,8 +728,8 @@ def test_data_hdr_cache(self): IC = self.image_class # save an image to a file map fm = IC.make_file_map() - for key in fm: - fm[key].fileobj = BytesIO() + for value in fm.values(): + value.fileobj = BytesIO() shape = (2, 3, 4) data = np.arange(24, dtype=np.int8).reshape(shape) affine = np.eye(4) From 74c853f5d9afa19f97ccf529b83763b852ae5e55 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:04:46 +0200 Subject: [PATCH 666/702] STY: Prefix unused loop control variable with an underscore Co-authored-by: Chris Markiewicz --- nibabel/tests/test_filehandles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 93eb284dfb..c985d35440 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -33,7 +33,7 @@ def test_multiload(): tmpdir = mkdtemp() fname = pjoin(tmpdir, 'test.img') save(img, fname) - imgs.extend(load(fname) for i in range(N)) + imgs.extend(load(fname) for _ in range(N)) finally: del img, imgs shutil.rmtree(tmpdir) From 25321329674bfde4ba45189ca67519a3a3e1246f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 24 Sep 2024 19:56:00 -0400 Subject: [PATCH 667/702] sty: Apply UP007, UP012 This is safe since we use from __future__ import annotations. --- nibabel/nifti1.py | 14 +++++++------- nibabel/tests/test_nifti1.py | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 3ad0ec9389..180f67cca4 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -325,15 +325,15 @@ class NiftiExtension(ty.Generic[T]): """ code: int - encoding: ty.Optional[str] = None + encoding: str | None = None _content: bytes - _object: ty.Optional[T] = None + _object: T | None = None def __init__( self, - code: ty.Union[int, str], + code: int | str, content: bytes = b'', - object: ty.Optional[T] = None, + object: T | None = None, ) -> None: """ Parameters @@ -565,9 +565,9 @@ class Nifti1DicomExtension(Nifti1Extension[DicomDataset]): def __init__( self, - code: ty.Union[int, str], - content: ty.Union[bytes, DicomDataset, None] = None, - parent_hdr: ty.Optional[Nifti1Header] = None, + code: int | str, + content: bytes | DicomDataset | None = None, + parent_hdr: Nifti1Header | None = None, ) -> None: """ Parameters diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index ec4b8674eb..a3626f5688 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -1239,7 +1239,7 @@ def test_extension_content_access(): assert ext.text == '123' # Test that encoding errors are caught - ascii_ext = Nifti1Extension('comment', 'hôpital'.encode('utf-8')) + ascii_ext = Nifti1Extension('comment', 'hôpital'.encode()) ascii_ext.encoding = 'ascii' with pytest.raises(UnicodeDecodeError): ascii_ext.text From f4646182fe16e6af965b575a90499e28d6840f9a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:30:48 +0200 Subject: [PATCH 668/702] STY: Apply ruff/flake8-pytest-style rule PT006 PT006 Wrong type passed to first argument of `@pytest.mark.parametrize`; expected `tuple` --- nibabel/cmdline/tests/test_convert.py | 6 +++--- nibabel/cmdline/tests/test_roi.py | 2 +- nibabel/tests/test_euler.py | 4 ++-- nibabel/tests/test_init.py | 2 +- nibabel/tests/test_pkg_info.py | 2 +- nibabel/tests/test_quaternions.py | 10 +++++----- nibabel/tests/test_scaling.py | 6 +++--- nibabel/tests/test_spaces.py | 2 +- nibabel/tests/test_testing.py | 2 +- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 021e6ea8ef..d500a717a3 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -71,7 +71,7 @@ def test_convert_dtype(tmp_path, data_dtype): @pytest.mark.parametrize( - 'ext,img_class', + ('ext', 'img_class'), [ ('mgh', nib.MGHImage), ('img', nib.Nifti1Pair), @@ -94,7 +94,7 @@ def test_convert_by_extension(tmp_path, ext, img_class): @pytest.mark.parametrize( - 'ext,img_class', + ('ext', 'img_class'), [ ('mgh', nib.MGHImage), ('img', nib.Nifti1Pair), @@ -141,7 +141,7 @@ def test_convert_nifti_int_fail(tmp_path): @pytest.mark.parametrize( - 'orig_dtype,alias,expected_dtype', + ('orig_dtype', 'alias', 'expected_dtype'), [ ('int64', 'mask', 'uint8'), ('int64', 'compat', 'int32'), diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index d2baa80eeb..19bdf29011 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -119,7 +119,7 @@ def test_nib_roi(tmp_path, inplace): @pytest.mark.parametrize( - 'args, errmsg', + ('args', 'errmsg'), ( (('-i', '1:1'), 'Cannot take zero-length slice'), (('-j', '1::2'), 'Downsampling is not supported'), diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index b0c965c399..1a781b8f14 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -123,7 +123,7 @@ def test_euler_mat_1(): assert_array_equal(M, np.eye(3)) -@pytest.mark.parametrize('x, y, z', eg_rots) +@pytest.mark.parametrize(('x', 'y', 'z'), eg_rots) def test_euler_mat_2(x, y, z): M1 = nea.euler2mat(z, y, x) M2 = sympy_euler(z, y, x) @@ -176,7 +176,7 @@ def test_euler_instability(): assert not np.allclose(M_e, M_e_back) -@pytest.mark.parametrize('x, y, z', eg_rots) +@pytest.mark.parametrize(('x', 'y', 'z'), eg_rots) def test_quats(x, y, z): M1 = nea.euler2mat(z, y, x) quatM = nq.mat2quat(M1) diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index 969b80b6fc..d54f55053b 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -13,7 +13,7 @@ @pytest.mark.parametrize( - 'verbose, v_args', [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] + ('verbose', 'v_args'), [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] ) @pytest.mark.parametrize('doctests', (True, False)) @pytest.mark.parametrize('coverage', (True, False)) diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index a39eac65b1..94ee903494 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -37,7 +37,7 @@ def test_cmp_pkg_version_0(): @pytest.mark.parametrize( - 'test_ver, pkg_ver, exp_out', + ('test_ver', 'pkg_ver', 'exp_out'), [ ('1.0', '1.0', 0), ('1.0.0', '1.0', 0), diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index ec882dd0b3..a5ec89d948 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -146,7 +146,7 @@ def test_inverse_0(): assert iq.dtype.kind == 'f' -@pytest.mark.parametrize('M, q', eg_pairs) +@pytest.mark.parametrize(('M', 'q'), eg_pairs) def test_inverse_1(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -169,15 +169,15 @@ def test_norm(): assert not nq.isunit(qi) -@pytest.mark.parametrize('M1, q1', eg_pairs[0::4]) -@pytest.mark.parametrize('M2, q2', eg_pairs[1::4]) +@pytest.mark.parametrize(('M1', 'q1'), eg_pairs[0::4]) +@pytest.mark.parametrize(('M2', 'q2'), eg_pairs[1::4]) def test_mult(M1, q1, M2, q2): # Test that quaternion * same as matrix * q21 = nq.mult(q2, q1) assert_array_almost_equal, M2 @ M1, nq.quat2mat(q21) -@pytest.mark.parametrize('M, q', eg_pairs) +@pytest.mark.parametrize(('M', 'q'), eg_pairs) def test_inverse(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -186,7 +186,7 @@ def test_inverse(M, q): @pytest.mark.parametrize('vec', np.eye(3)) -@pytest.mark.parametrize('M, q', eg_pairs) +@pytest.mark.parametrize(('M', 'q'), eg_pairs) def test_qrotate(vec, M, q): vdash = nq.rotate_vector(vec, q) vM = M @ vec diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index f667b4164d..eae0b1702c 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -25,7 +25,7 @@ @pytest.mark.parametrize( - 'in_arr, res', + ('in_arr', 'res'), [ ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), @@ -134,7 +134,7 @@ def test_a2f_nan2zero(): @pytest.mark.parametrize( - 'in_type, out_type', + ('in_type', 'out_type'), [ (np.int16, np.int16), (np.int16, np.int8), @@ -163,7 +163,7 @@ def test_array_file_scales(in_type, out_type): @pytest.mark.parametrize( - 'category0, category1, overflow', + ('category0', 'category1', 'overflow'), [ # Confirm that, for all ints and uints as input, and all possible outputs, # for any simple way of doing the calculation, the result is near enough diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index f5e467b2cc..4722228a5b 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -125,7 +125,7 @@ def test_slice2volume(): @pytest.mark.parametrize( - 'index, axis', + ('index', 'axis'), [ [-1, 0], [0, -1], diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 04ba813d8b..ec147baa95 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -148,7 +148,7 @@ def f(): @pytest.mark.parametrize( - 'regex, entries', + ('regex', 'entries'), [ ['.*', ''], ['.*', ['any']], From bb1c08b44ceb923a850beb86f25576a1e4866c5b Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:35:25 +0200 Subject: [PATCH 669/702] STY: Apply ruff/flake8-pytest-style rule PT014 PT014 Duplicate of test case --- nibabel/tests/test_pkg_info.py | 2 -- nibabel/tests/test_scaling.py | 1 - 2 files changed, 3 deletions(-) diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 94ee903494..1a9a06dc93 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -54,8 +54,6 @@ def test_cmp_pkg_version_0(): ('1.2.1rc1', '1.2.1', -1), ('1.2.1rc1', '1.2.1rc', 1), ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), ('1.2.1b', '1.2.1a', 1), ('1.2.1a', '1.2.1b', -1), ('1.2.0+1', '1.2', 1), diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index eae0b1702c..ccc379c256 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -36,7 +36,6 @@ ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([np.nan], (np.inf, -np.inf)), ([np.inf], (np.inf, -np.inf)), ([-np.inf], (np.inf, -np.inf)), From 30cba2ca39bc02a2da7f7411a178354046fd6cd2 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:37:46 +0200 Subject: [PATCH 670/702] STY: Apply ruff/flake8-pytest-style rule PT015 PT015 Assertion always fails, replace with `pytest.fail()` --- nibabel/cmdline/tests/test_roi.py | 2 +- nibabel/tests/test_removalschedule.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index 19bdf29011..5f538d53f4 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -143,7 +143,7 @@ def test_entrypoint(capsys): except SystemExit: pass else: - assert False, 'argparse exits on --help. If changing to another parser, update test.' + pytest.fail('argparse exits on --help. If changing to another parser, update test.') captured = capsys.readouterr() assert captured.out.startswith('usage: nib-roi') diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 7a56f3fb8b..d2bc7da2fc 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -125,7 +125,7 @@ def test_module_removal(): for module in _filter(MODULE_SCHEDULE): with pytest.raises(ImportError): __import__(module) - assert False, f'Time to remove {module}' + raise AssertionError(f'Time to remove {module}') def test_object_removal(): From e4a8d1c9f8e79dbd43b62cbc24dfeeb98abf27b3 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:43:42 +0200 Subject: [PATCH 671/702] STY: Apply ruff/flake8-pytest-style rule PT017 PT017 Found assertion on exception `err` in `except` block, use `pytest.raises()` instead --- nibabel/tests/test_tripwire.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index bcc81b5f5f..6bc4e8533e 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -16,9 +16,6 @@ def test_tripwire(): with pytest.raises(TripWireError): silly_module_name.do_silly_thing # Check AttributeError can be checked too - try: + with pytest.raises(AttributeError) as err: silly_module_name.__wrapped__ - except TripWireError as err: - assert isinstance(err, AttributeError) - else: - raise RuntimeError('No error raised, but expected') + assert isinstance(err.value, AttributeError) From 341d6d79e35f328b4a6ab2ddd3aa2dc8b5416c2e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:45:31 +0200 Subject: [PATCH 672/702] STY: Apply ruff/flake8-pytest-style rule PT022 PT022 No teardown in fixture `db`, use `return` instead of `yield` --- nibabel/tests/test_dft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index 6c6695b16e..6155dda83c 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -58,7 +58,7 @@ def db(monkeypatch): and not modify the host filesystem.""" database = dft._DB(fname=':memory:') monkeypatch.setattr(dft, 'DB', database) - yield database + return database def test_init(db): From bb549fbc84643020b4159d07cf6abcc0fbc34a45 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:46:22 +0200 Subject: [PATCH 673/702] STY: Apply ruff/flake8-pytest-style rule PT027 PT027 Use `pytest.raises` instead of unittest-style `assertRaises` --- nibabel/streamlines/tests/test_streamlines.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 857e64fec9..359cbc5e1c 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -191,13 +191,13 @@ def test_save_tractogram_file(self): trk_file = trk.TrkFile(tractogram) # No need for keyword arguments. - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.save(trk_file, 'dummy.trk', header={}) # Wrong extension. with pytest.warns(ExtensionWarning, match='extension'): trk_file = trk.TrkFile(tractogram) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.save(trk_file, 'dummy.tck', header={}) with InTemporaryDirectory(): @@ -272,11 +272,11 @@ def test_save_sliced_tractogram(self): assert_tractogram_equal(tractogram, original_tractogram) def test_load_unknown_format(self): - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.load('') def test_save_unknown_format(self): - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.save(Tractogram(), '') def test_save_from_generator(self): From a7e1afdb0b292ae7de45bfadb3d9313b9341df70 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:44:19 +0200 Subject: [PATCH 674/702] STY: Enforce ruff/flake8-pytest-style rules (PT) --- pyproject.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index c973d3e0c2..22be5f917f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -125,6 +125,7 @@ select = [ "I", "PIE", "PLE", + "PT", "PYI", "Q", "RSE", @@ -146,6 +147,12 @@ ignore = [ "C408", "C416", "PIE790", + "PT004", + "PT007", + "PT011", + "PT012", + "PT017", + "PT018", "PYI024", # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", From 0e1dee31894e3034031ce0a251c5cfe73da5cdfc Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:32:58 +0200 Subject: [PATCH 675/702] MNT: Drop test which verifies that TripWireError is an AttributeError Co-authored-by: Chris Markiewicz --- nibabel/tests/test_tripwire.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index 6bc4e8533e..d7daefe0b1 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -18,4 +18,3 @@ def test_tripwire(): # Check AttributeError can be checked too with pytest.raises(AttributeError) as err: silly_module_name.__wrapped__ - assert isinstance(err.value, AttributeError) From e58e2ea40ed5c9d0d5bf613e86c789ea0689eedb Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:34:04 +0200 Subject: [PATCH 676/702] MNT: Simplify try/except/else block Co-authored-by: Chris Markiewicz --- nibabel/cmdline/tests/test_roi.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index 5f538d53f4..4692bbb038 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -138,12 +138,8 @@ def test_nib_roi_bad_slices(capsys, args, errmsg): def test_entrypoint(capsys): # Check that we handle missing args as expected with mock.patch('sys.argv', ['nib-roi', '--help']): - try: + with pytest.raises(SystemExit): main() - except SystemExit: - pass - else: - pytest.fail('argparse exits on --help. If changing to another parser, update test.') captured = capsys.readouterr() assert captured.out.startswith('usage: nib-roi') From 35124b7f45604d54fe90753c1f7119bddf9eb997 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:10:42 +0200 Subject: [PATCH 677/702] STY: Apply ruff/pygrep-hooks rule PGH004 PGH004 Do not add spaces between `noqa` and its colon --- nibabel/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/conftest.py b/nibabel/conftest.py index a4f8b6de90..b16a832f28 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -5,7 +5,7 @@ # Ignore warning requesting help with nicom with pytest.warns(UserWarning): - import nibabel.nicom # noqa :401 + import nibabel.nicom # noqa: F401 @pytest.fixture(scope='session', autouse=True) From f31bf2b95f975e5e03e5e50f88b0c65225f733e0 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 16:12:51 +0200 Subject: [PATCH 678/702] STY: Apply ruff/pygrep-hooks rule PGH004 PGH004 Use specific rule codes when using `noqa` --- nibabel/benchmarks/bench_array_to_file.py | 4 ++-- nibabel/benchmarks/bench_finite_range.py | 2 +- nibabel/xmlutils.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index c2bab7e95e..2af8b5677f 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -11,12 +11,12 @@ """ import sys -from io import BytesIO # NOQA +from io import BytesIO # noqa: F401 import numpy as np from numpy.testing import measure -from nibabel.volumeutils import array_to_file # NOQA +from nibabel.volumeutils import array_to_file # noqa: F401 from .butils import print_git_title diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index edd839ce61..957446884c 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -15,7 +15,7 @@ import numpy as np from numpy.testing import measure -from nibabel.volumeutils import finite_range # NOQA +from nibabel.volumeutils import finite_range # noqa: F401 from .butils import print_git_title diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 5d079e1172..12fd30f225 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -9,7 +9,7 @@ """Thin layer around xml.etree.ElementTree, to abstract nibabel xml support""" from io import BytesIO -from xml.etree.ElementTree import Element, SubElement, tostring # noqa +from xml.etree.ElementTree import Element, SubElement, tostring # noqa: F401 from xml.parsers.expat import ParserCreate from .filebasedimages import FileBasedHeader From aea7fe7be420deaa8c93ea8d7711c7a77214eb92 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:53:46 +0200 Subject: [PATCH 679/702] STY: Enforce ruff/pygrep-hooks rules (PGH) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 748dc12ce1..e865cd0097 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,6 +124,7 @@ select = [ "FURB", "I", "PERF", + "PGH", "PIE", "PLE", "PT", From 50e9231c2257e6bd6773f241e54815a6608d514b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 04:47:31 +0000 Subject: [PATCH 680/702] Bump deadsnakes/action from 3.1.0 to 3.2.0 Bumps [deadsnakes/action](https://github.com/deadsnakes/action) from 3.1.0 to 3.2.0. - [Release notes](https://github.com/deadsnakes/action/releases) - [Commits](https://github.com/deadsnakes/action/compare/v3.1.0...v3.2.0) --- updated-dependencies: - dependency-name: deadsnakes/action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 05718dc1ff..9e5ddd5162 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -176,7 +176,7 @@ jobs: allow-prereleases: true - name: Set up Python ${{ matrix.python-version }} if: endsWith(matrix.python-version, '-dev') - uses: deadsnakes/action@v3.1.0 + uses: deadsnakes/action@v3.2.0 with: python-version: ${{ matrix.python-version }} nogil: true From afa13e717b8ef355224f2d45dfa834f5df481bf1 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 15:49:34 +0200 Subject: [PATCH 681/702] STY: Apply ruff rule RUF100 RUF100 Unused `noqa` directive --- nibabel/casting.py | 8 ++++---- nibabel/info.py | 2 +- nibabel/parrec.py | 2 -- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/nibabel/casting.py b/nibabel/casting.py index 042a2f415d..b279325477 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -51,11 +51,11 @@ class CastingError(Exception): getattr(np, dtype) for dtype in ( 'int8', 'byte', 'int16', 'short', 'int32', 'intc', 'int_', 'int64', 'longlong', - 'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', # noqa: E501 - 'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', # noqa: E501 - 'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # noqa: E501 + 'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', + 'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', + 'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # other names of the built-in scalar types - 'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # noqa: E501 + 'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # other 'object_', 'void', ) diff --git a/nibabel/info.py b/nibabel/info.py index d7873de211..87727cab13 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -108,4 +108,4 @@ .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier .. _zenodo: https://zenodo.org -""" # noqa: E501 +""" diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 8b3ffb34a2..0a2005835f 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -6,8 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# Disable line length checking for PAR fragments in module docstring -# noqa: E501 """Read images in PAR/REC format This is yet another MRI image format generated by Philips scanners. It is an From 5ea47a7cc1258fe5fc7c2b9cdc0ece9bf8baeaec Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:57:07 +0200 Subject: [PATCH 682/702] STY: Encorce ruff rules (RUF) --- pyproject.toml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e865cd0097..9b5815e332 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -131,18 +131,19 @@ select = [ "PYI", "Q", "RSE", + "RUF", "TCH", "UP", ] ignore = [ - "B006", # TODO: enable - "B008", # TODO: enable + "B006", # TODO: enable + "B008", # TODO: enable "B007", "B011", - "B017", # TODO: enable + "B017", # TODO: enable "B018", "B020", - "B023", # TODO: enable + "B023", # TODO: enable "B028", "B904", "C401", @@ -157,6 +158,10 @@ ignore = [ "PT017", "PT018", "PYI024", + "RUF005", + "RUF012", # TODO: enable + "RUF015", + "RUF017", # TODO: enable # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", "E111", From e52c4c8d338ec588d633ed2cd99a9bc62e14ba93 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:09:49 +0200 Subject: [PATCH 683/702] STY: Disable deprecated ruff rules --- pyproject.toml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e865cd0097..23827a9967 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,14 +135,14 @@ select = [ "UP", ] ignore = [ - "B006", # TODO: enable - "B008", # TODO: enable + "B006", # TODO: enable + "B008", # TODO: enable "B007", "B011", - "B017", # TODO: enable + "B017", # TODO: enable "B018", "B020", - "B023", # TODO: enable + "B023", # TODO: enable "B028", "B904", "C401", @@ -150,13 +150,16 @@ ignore = [ "C416", "PERF203", "PIE790", - "PT004", + "PT004", # deprecated + "PT005", # deprecated "PT007", "PT011", "PT012", "PT017", "PT018", "PYI024", + "UP027", # deprecated + "UP038", # https://github.com/astral-sh/ruff/issues/7871 # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", "E111", From 9f28bc8b0c3e70665a7abdd4fa0fd20ee772acfe Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:47:08 +0200 Subject: [PATCH 684/702] STY: Apply ruff/pyupgrade preview rule UP031 UP031 Use format specifiers instead of percent format Co-authored-by: Chris Markiewicz --- nibabel/analyze.py | 6 +++--- nibabel/cifti2/cifti2_axes.py | 5 ++--- nibabel/cmdline/dicomfs.py | 12 ++++++------ nibabel/cmdline/diff.py | 4 ++-- nibabel/cmdline/ls.py | 10 +++++----- nibabel/dft.py | 2 +- nibabel/ecat.py | 4 ++-- nibabel/fileslice.py | 2 +- nibabel/freesurfer/io.py | 8 ++++---- nibabel/freesurfer/mghformat.py | 2 +- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/parse_gifti_fast.py | 4 ++-- nibabel/nicom/csareader.py | 2 +- nibabel/nicom/dicomreaders.py | 4 ++-- nibabel/nifti1.py | 10 +++++----- nibabel/orientations.py | 2 +- nibabel/spatialimages.py | 2 +- nibabel/tests/test_funcs.py | 2 +- 18 files changed, 41 insertions(+), 42 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 34597319d6..d02363c792 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -699,7 +699,7 @@ def set_zooms(self, zooms): ndim = dims[0] zooms = np.asarray(zooms) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) + raise HeaderDataError(f'Expecting {ndim} zoom values for ndim {ndim}') if np.any(zooms < 0): raise HeaderDataError('zooms must be positive') pixdims = hdr['pixdim'] @@ -818,11 +818,11 @@ def _chk_datatype(klass, hdr, fix=False): dtype = klass._data_type_codes.dtype[code] except KeyError: rep.problem_level = 40 - rep.problem_msg = 'data code %d not recognized' % code + rep.problem_msg = f'data code {code} not recognized' else: if dtype.itemsize == 0: rep.problem_level = 40 - rep.problem_msg = 'data code %d not supported' % code + rep.problem_msg = f'data code {code} not supported' else: return hdr, rep if fix: diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index af7c63beaa..32914be1b6 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -373,7 +373,7 @@ def from_mask(cls, mask, name='other', affine=None): else: raise ValueError( 'Mask should be either 1-dimensional (for surfaces) or ' - '3-dimensional (for volumes), not %i-dimensional' % mask.ndim + f'3-dimensional (for volumes), not {mask.ndim}-dimensional' ) @classmethod @@ -1519,7 +1519,6 @@ def get_element(self, index): index = self.size + index if index >= self.size or index < 0: raise IndexError( - 'index %i is out of range for SeriesAxis with size %i' - % (original_index, self.size) + f'index {original_index} is out of range for SeriesAxis with size {self.size}' ) return self.start + self.step * index diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index afd994b151..07aa51e2d3 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -51,7 +51,7 @@ def __init__(self, fno): self.direct_io = False def __str__(self): - return 'FileHandle(%d)' % self.fno + return f'FileHandle({self.fno})' class DICOMFS(fuse.Fuse): @@ -85,11 +85,11 @@ def get_paths(self): series_info += f'UID: {series.uid}\n' series_info += f'number: {series.number}\n' series_info += f'description: {series.description}\n' - series_info += 'rows: %d\n' % series.rows - series_info += 'columns: %d\n' % series.columns - series_info += 'bits allocated: %d\n' % series.bits_allocated - series_info += 'bits stored: %d\n' % series.bits_stored - series_info += 'storage instances: %d\n' % len(series.storage_instances) + series_info += f'rows: {series.rows}\n' + series_info += f'columns: {series.columns}\n' + series_info += f'bits allocated: {series.bits_allocated}\n' + series_info += f'bits stored: {series.bits_stored}\n' + series_info += f'storage instances: {len(series.storage_instances)}\n' d[series.number] = { 'INFO': series_info.encode('ascii', 'replace'), f'{series.number}.nii': (series.nifti_size, series.as_nifti), diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 36760f7ebb..55f827e973 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -266,7 +266,7 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1.append({'CMP': 'incompat'}) if any(diffs1): - diffs['DATA(diff %d:)' % (i + 1)] = diffs1 + diffs[f'DATA(diff {i + 1}:)'] = diffs1 return diffs @@ -293,7 +293,7 @@ def display_diff(files, diff): output += field_width.format('Field/File') for i, f in enumerate(files, 1): - output += '%d:%s' % (i, filename_width.format(os.path.basename(f))) + output += f'{i}:{filename_width.format(os.path.basename(f))}' output += '\n' diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index f79c27f0c5..72fb227687 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -73,7 +73,7 @@ def get_opt_parser(): action='store_true', dest='all_counts', default=False, - help='Output all counts, even if number of unique values > %d' % MAX_UNIQUE, + help=f'Output all counts, even if number of unique values > {MAX_UNIQUE}', ), Option( '-z', @@ -117,7 +117,7 @@ def proc_file(f, opts): row += [''] if hasattr(h, 'extensions') and len(h.extensions): - row += ['@l#exts: %d' % len(h.extensions)] + row += [f'@l#exts: {len(h.extensions)}'] else: row += [''] @@ -166,16 +166,16 @@ def proc_file(f, opts): d = d.reshape(-1) if opts.stats: # just # of elements - row += ['@l[%d]' % np.prod(d.shape)] + row += [f'@l[{np.prod(d.shape)}]'] # stats row += [f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' if len(d) else '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: - counts = _err('%d uniques. Use --all-counts' % len(items)) + counts = _err(f'{len(items)} uniques. Use --all-counts') else: freq = np.bincount(inv) - counts = ' '.join('%g:%d' % (i, f) for i, f in zip(items, freq)) + counts = ' '.join(f'{i:g}:{f}' for i, f in zip(items, freq)) row += ['@l' + counts] except OSError as e: verbose(2, f'Failed to obtain stats/counts -- {e}') diff --git a/nibabel/dft.py b/nibabel/dft.py index e63c9c4796..23108895b2 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -162,7 +162,7 @@ def as_nifti(self): for i, si in enumerate(self.storage_instances): if i + 1 != si.instance_number: raise InstanceStackError(self, i, si) - logger.info('reading %d/%d' % (i + 1, len(self.storage_instances))) + logger.info(f'reading {i + 1}/{len(self.storage_instances)}') d = self.storage_instances[i].dicom() data[i, :, :] = d.pixel_array diff --git a/nibabel/ecat.py b/nibabel/ecat.py index c4b55624f9..f634bcd8a6 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -309,14 +309,14 @@ def get_patient_orient(self): """ code = self._structarr['patient_orientation'].item() if code not in self._patient_orient_codes: - raise KeyError('Ecat Orientation CODE %d not recognized' % code) + raise KeyError(f'Ecat Orientation CODE {code} not recognized') return self._patient_orient_codes[code] def get_filetype(self): """Type of ECAT Matrix File from code stored in header""" code = self._structarr['file_type'].item() if code not in self._ft_codes: - raise KeyError('Ecat Filetype CODE %d not recognized' % code) + raise KeyError(f'Ecat Filetype CODE {code} not recognized') return self._ft_codes[code] @classmethod diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 816f1cdaf6..91ed1f70a1 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -127,7 +127,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if slicer < 0: slicer = dim_len + slicer elif check_inds and slicer >= dim_len: - raise ValueError('Integer index %d to large' % slicer) + raise ValueError(f'Integer index {slicer} too large') can_slicers.append(slicer) # Fill out any missing dimensions if n_real < n_dim: diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 74bc05fc31..31745df720 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -427,7 +427,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): for i in range(n_entries): # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] + name = np.fromfile(fobj, f'|S{name_length}', 1)[0] names.append(name) # read RGBT for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) @@ -471,7 +471,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): ctab = np.zeros((max_index, 5), dt) # orig_tab string length + string length = np.fromfile(fobj, dt, 1)[0] - np.fromfile(fobj, '|S%d' % length, 1)[0] # Orig table path + np.fromfile(fobj, f'|S{length}', 1)[0] # Orig table path # number of LUT entries present in the file entries_to_read = np.fromfile(fobj, dt, 1)[0] names = list() @@ -480,7 +480,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): idx = np.fromfile(fobj, dt, 1)[0] # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] + name = np.fromfile(fobj, f'|S{name_length}', 1)[0] names.append(name) # RGBT ctab[idx, :4] = np.fromfile(fobj, dt, 4) @@ -525,7 +525,7 @@ def write(num, dtype=dt): def write_string(s): s = (s if isinstance(s, bytes) else s.encode()) + b'\x00' write(len(s)) - write(s, dtype='|S%d' % len(s)) + write(s, dtype=f'|S{len(s)}') # Generate annotation values for each ctab entry if fill_ctab: diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 6efa67ffa8..0adcb88e2c 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -281,7 +281,7 @@ def set_zooms(self, zooms): zooms = np.asarray(zooms) ndims = self._ndims() if len(zooms) > ndims: - raise HeaderDataError('Expecting %d zoom values' % ndims) + raise HeaderDataError(f'Expecting {ndims} zoom values') if np.any(zooms[:3] <= 0): raise HeaderDataError( f'Spatial (first three) zooms must be positive; got {tuple(zooms[:3])}' diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index c983a14dfd..76fcc4a451 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -522,7 +522,7 @@ def _to_xml_element(self): }, ) for di, dn in enumerate(self.dims): - data_array.attrib['Dim%d' % di] = str(dn) + data_array.attrib[f'Dim{di}'] = str(dn) if self.meta is not None: data_array.append(self.meta._to_xml_element()) diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index ccd608324a..5bcd8c8c32 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -284,8 +284,8 @@ def EndElementHandler(self, name): if name == 'GIFTI': if hasattr(self, 'expected_numDA') and self.expected_numDA != self.img.numDA: warnings.warn( - 'Actual # of data arrays does not match ' - '# expected: %d != %d.' % (self.expected_numDA, self.img.numDA) + 'Actual # of data arrays does not match # expected: ' + f'{self.expected_numDA} != {self.img.numDA}.' ) # remove last element of the list self.fsm_state.pop() diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index df379e0be8..b98dae7403 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -179,7 +179,7 @@ def get_vector(csa_dict, tag_name, n): if len(items) == 0: return None if len(items) != n: - raise ValueError('Expecting %d vector' % n) + raise ValueError(f'Expecting {n} vector') return np.array(items) diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 5892bb8db2..07362ee47d 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -131,7 +131,7 @@ def slices_to_series(wrappers): break else: # no match in current volume lists volume_lists.append([dw]) - print('We appear to have %d Series' % len(volume_lists)) + print(f'We appear to have {len(volume_lists)} Series') # second pass out_vol_lists = [] for vol_list in volume_lists: @@ -143,7 +143,7 @@ def slices_to_series(wrappers): out_vol_lists += _third_pass(vol_list) continue out_vol_lists.append(vol_list) - print('We have %d volumes after second pass' % len(out_vol_lists)) + print(f'We have {len(out_vol_lists)} volumes after second pass') # final pass check for vol_list in out_vol_lists: zs = [s.slice_indicator for s in vol_list] diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 180f67cca4..b9c78c81bc 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1559,7 +1559,7 @@ def get_intent(self, code_repr='label'): else: raise TypeError('repr can be "label" or "code"') n_params = len(recoder.parameters[code]) if known_intent else 0 - params = (float(hdr['intent_p%d' % (i + 1)]) for i in range(n_params)) + params = (float(hdr[f'intent_p{i}']) for i in range(1, n_params + 1)) name = hdr['intent_name'].item().decode('latin-1') return label, tuple(params), name @@ -1632,8 +1632,8 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): hdr['intent_name'] = name all_params = [0] * 3 all_params[: len(params)] = params[:] - for i, param in enumerate(all_params): - hdr['intent_p%d' % (i + 1)] = param + for i, param in enumerate(all_params, start=1): + hdr[f'intent_p{i}'] = param def get_slice_duration(self): """Get slice duration @@ -1911,7 +1911,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep if magic == hdr.single_magic and offset < hdr.single_vox_offset: rep.problem_level = 40 - rep.problem_msg = 'vox offset %d too low for single file nifti1' % offset + rep.problem_msg = f'vox offset {int(offset)} too low for single file nifti1' if fix: hdr['vox_offset'] = hdr.single_vox_offset rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' @@ -1943,7 +1943,7 @@ def _chk_xform_code(klass, code_type, hdr, fix): if code in recoder.value_set(): return hdr, rep rep.problem_level = 30 - rep.problem_msg = '%s %d not valid' % (code_type, code) + rep.problem_msg = f'{code_type} {code} not valid' if fix: hdr[code_type] = 0 rep.fix_msg = 'setting to 0' diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 7265bf56f3..12e414def9 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -124,7 +124,7 @@ def ornt_transform(start_ornt, end_ornt): result[start_in_idx, :] = [end_in_idx, flip] break else: - raise ValueError('Unable to find out axis %d in start_ornt' % end_out_idx) + raise ValueError(f'Unable to find out axis {end_out_idx} in start_ornt') return result diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index ce8ee3c6e6..19677c1a7d 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -267,7 +267,7 @@ def set_zooms(self, zooms: Sequence[float]) -> None: shape = self.get_data_shape() ndim = len(shape) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) + raise HeaderDataError(f'Expecting {ndim} zoom values for ndim {ndim}') if any(z < 0 for z in zooms): raise HeaderDataError('zooms must be positive') self._zooms = zooms diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 5e59bc63b6..8666406168 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -23,7 +23,7 @@ def _as_fname(img): global _counter - fname = 'img%3d.nii' % _counter + fname = f'img{_counter:3d}.nii' _counter = _counter + 1 save(img, fname) return fname From 95cc728dd0c49245373d928f73c263a7ca7f7813 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 20:03:23 +0200 Subject: [PATCH 685/702] =?UTF-8?q?MNT:=20Python=203=20string=20formatting?= =?UTF-8?q?:=20%i=20=E2=86=92=20%d?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Chris Markiewicz --- nibabel/freesurfer/io.py | 2 +- nibabel/gifti/util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 31745df720..5b3f6a3664 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -465,7 +465,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): dt = _ANNOT_DT # This code works with a file version == 2, nothing else if ctab_version != 2: - raise Exception('Unrecognised .annot file version (%i)', ctab_version) + raise Exception(f'Unrecognised .annot file version ({ctab_version})') # maximum LUT index present in the file max_index = np.fromfile(fobj, dt, 1)[0] ctab = np.zeros((max_index, 5), dt) diff --git a/nibabel/gifti/util.py b/nibabel/gifti/util.py index 9393292013..791f133022 100644 --- a/nibabel/gifti/util.py +++ b/nibabel/gifti/util.py @@ -10,7 +10,7 @@ from ..volumeutils import Recoder # Translate dtype.kind char codes to XML text output strings -KIND2FMT = {'i': '%i', 'u': '%i', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} +KIND2FMT = {'i': '%d', 'u': '%d', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} array_index_order_codes = Recoder( ( From 5daffcce1ed1f6c399d9ed057a32c038a0f87a25 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:49:36 +0200 Subject: [PATCH 686/702] STY: Apply ruff/refurb preview rule FURB145 FURB145 Prefer `copy` method over slicing --- nibabel/tests/test_nifti1.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 8eae0410e9..f0029681b8 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -578,12 +578,12 @@ def test_slice_times(self): with pytest.raises(HeaderDataError): # all None hdr.set_slice_times((None,) * len(times)) - n_mid_times = times[:] + n_mid_times = times.copy() n_mid_times[3] = None with pytest.raises(HeaderDataError): # None in middle hdr.set_slice_times(n_mid_times) - funny_times = times[:] + funny_times = times.copy() funny_times[3] = 0.05 with pytest.raises(HeaderDataError): # can't get single slice duration From 4810cd78bd7d21b9e9f8754bb0a7bd4a86235c49 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:52:29 +0200 Subject: [PATCH 687/702] STY: Apply ruff/refurb preview rule FURB148 FURB148 `enumerate` index is unused, use `for x in y` instead --- nibabel/cifti2/tests/test_cifti2io_header.py | 2 +- nibabel/tests/test_round_trip.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 1c37cfe0e7..ecdf0c69a7 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -72,7 +72,7 @@ def test_read_and_proxies(): @needs_nibabel_data('nitest-cifti2') def test_version(): - for i, dat in enumerate(datafiles): + for dat in datafiles: img = nib.load(dat) assert Version(img.header.version) == Version('2') diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 07783fe550..6daf960aa4 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -108,15 +108,15 @@ def test_round_trip(): iuint_types = [t for t in iuint_types if t in nifti_supported] f_types = [np.float32, np.float64] # Expanding standard deviations - for i, sd_10 in enumerate(sd_10s): + for sd_10 in sd_10s: sd = 10.0**sd_10 V_in = rng.normal(0, sd, size=(N, 1)) - for j, in_type in enumerate(f_types): - for k, out_type in enumerate(iuint_types): + for in_type in f_types: + for out_type in iuint_types: check_arr(sd_10, V_in, in_type, out_type, scaling_type) # Spread integers across range - for i, sd in enumerate(np.linspace(0.05, 0.5, 5)): - for j, in_type in enumerate(iuint_types): + for sd in np.linspace(0.05, 0.5, 5): + for in_type in iuint_types: info = np.iinfo(in_type) mn, mx = info.min, info.max type_range = mx - mn @@ -124,7 +124,7 @@ def test_round_trip(): # float(sd) because type_range can be type 'long' width = type_range * float(sd) V_in = rng.normal(center, width, size=(N, 1)) - for k, out_type in enumerate(iuint_types): + for out_type in iuint_types: check_arr(sd, V_in, in_type, out_type, scaling_type) From 02b7b0e308b594f730cd139448fbc3e9a0fc4b47 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:55:39 +0200 Subject: [PATCH 688/702] STY: Apply ruff/refurb preview rule FURB157 FURB157 Verbose expression in `Decimal` constructor --- nibabel/nicom/tests/test_dicomwrappers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index db3f667518..aefb35e892 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -991,8 +991,8 @@ def test_scale_data(self): assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) # Decimals are OK for frame in frames: - frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal('3') - frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal('-2') + frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal(3) + frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal(-2) assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) # A per-frame RWV scaling takes precedence over per-frame PixelValueTransformation for frame in frames: From 8c2a501de8c7a1d278634f00320acbfb22355799 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:56:25 +0200 Subject: [PATCH 689/702] STY: Apply ruff/refurb preview rule FURB192 FURB192 Prefer `min` over `sorted()` to compute the minimum value in a sequence --- nibabel/nicom/dicomwrappers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 009880e496..64b2b4a96d 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -565,7 +565,7 @@ def applies(self, dcm_wrp) -> bool: warnings.warn( 'A multi-stack file was passed without an explicit filter, just using lowest StackID' ) - self._selected = sorted(stack_ids)[0] + self._selected = min(stack_ids) return True return False From 73bae7e98c4d86492f266adfad38febf41107a4a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:59:16 +0200 Subject: [PATCH 690/702] STY: Apply ruff/flake8-comprehensions preview rule C409 C409 Unnecessary list comprehension passed to `tuple()` (rewrite as a generator) --- nibabel/streamlines/tests/test_array_sequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index a06b2c45d9..96e66b44c5 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -79,7 +79,7 @@ def test_creating_arraysequence_from_list(self): # List of ndarrays. N = 5 for ndim in range(1, N + 1): - common_shape = tuple([SEQ_DATA['rng'].randint(1, 10) for _ in range(ndim - 1)]) + common_shape = tuple(SEQ_DATA['rng'].randint(1, 10) for _ in range(ndim - 1)) data = generate_data(nb_arrays=5, common_shape=common_shape, rng=SEQ_DATA['rng']) check_arr_seq(ArraySequence(data), data) From b33bcde28337707fcd71dbddf69d8d1bc52a75ca Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 19:00:30 +0200 Subject: [PATCH 691/702] STY: Apply ruff/flake8-comprehensions preview rule C419 C419 Unnecessary list comprehension --- nibabel/orientations.py | 2 +- nibabel/tests/test_volumeutils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 12e414def9..b620fff02b 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -322,7 +322,7 @@ def axcodes2ornt(axcodes, labels=None): [ 2., 1.]]) """ labels = list(zip('LPI', 'RAS')) if labels is None else labels - allowed_labels = sum([list(L) for L in labels], []) + [None] + allowed_labels = sum((list(L) for L in labels), []) + [None] if len(allowed_labels) != len(set(allowed_labels)): raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 9d321f07e4..1bd44cbd0a 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -607,7 +607,7 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) + NUMERICAL_TYPES = sum((sctypes[key] for key in ['int', 'uint', 'float', 'complex']), []) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, NUMERICAL_TYPES, From b8487cec305898d353c0fe10a814bc3bb87d6f80 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 2 Oct 2024 15:04:06 +0200 Subject: [PATCH 692/702] MNT: Fix misspellings found by codespell --- nibabel/tests/test_casting.py | 2 +- nibabel/tests/test_proxy_api.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index d4cf81515a..c6c1ddb661 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -161,7 +161,7 @@ def test_floor_log2(): def test_able_int_type(): - # The integer type cabable of containing values + # The integer type capable of containing values for vals, exp_out in ( ([0, 1], np.uint8), ([0, 255], np.uint8), diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 421bc5bf47..ba0f784d59 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -25,7 +25,7 @@ * if you pass a header into the __init__, then modifying the original header will not affect the result of the array return. -These last are to allow the proxy to be re-used with different images. +These last are to allow the proxy to be reused with different images. """ import unittest From ec15839f8141745600e40ce1b737ba768d33d2fe Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 2 Oct 2024 19:05:00 +0200 Subject: [PATCH 693/702] MNT: better way to normalize sequences to lists and flatten Co-authored-by: Chris Markiewicz --- nibabel/orientations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/orientations.py b/nibabel/orientations.py index b620fff02b..f1cdd228be 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -322,7 +322,7 @@ def axcodes2ornt(axcodes, labels=None): [ 2., 1.]]) """ labels = list(zip('LPI', 'RAS')) if labels is None else labels - allowed_labels = sum((list(L) for L in labels), []) + [None] + allowed_labels = sum(map(list, labels), [None]) if len(allowed_labels) != len(set(allowed_labels)): raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): From 7a733f6f54c9f382f28e468c1fab8d414b8fdae6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 10:36:30 -0400 Subject: [PATCH 694/702] DOC: Update changelog --- Changelog | 61 ++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 51 insertions(+), 10 deletions(-) diff --git a/Changelog b/Changelog index 24e89095f3..f72a6a8874 100644 --- a/Changelog +++ b/Changelog @@ -25,31 +25,72 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. -Upcoming release (To be determined) -=================================== +5.3.0 (Tuesday 8 October 2024) +============================== + +This release primarily adds support for Python 3.13 and Numpy 2.0. + +NiBabel 6.0 will drop support for Numpy 1.x. New features ------------ +* Update NIfTI extension protocol to include ``.content : bytes``, ``.text : str`` and ``.json : dict`` + properties for accessing extension contents. Exceptions will be raised on ``.text`` and ``.json`` if + conversion fails. (pr/1336) (CM) Enhancements ------------ - * Ability to read data from many multiframe DICOM files that previously generated errors +* Ability to read data from many multiframe DICOM files that previously generated errors (pr/1340) + (Brendan Moloney, reviewed by CM) +* ``nib-nifti-dx`` now supports NIfTI-2 files with a ``--nifti2`` flag (pr/1323) (CM) +* Update :mod:`nibabel.streamlines.tractogram` to support ragged arrays. (pr/1291) + (Serge Koudoro, reviewed by CM) +* Filter numpy ``UserWarning`` on ``np.finfo(np.longdouble)``. This can occur on + Windows systems, but it's done in the context of checking for the problem that + is being warned against, so there's no need to be noisy. (pr/1310) + (Joshua Newton, reviewed by CM) +* Improve error message for for dicomwrapper errors in shape calculation (pr/1302) + (YOH, reviewed by CM) +* Support "flat" ASCII-encoded GIFTI DataArrays (pr/1298) (PM, reviewed by CM) Bug fixes --------- - * Fixed multiframe DICOM issue where data could be flipped along slice dimension relative to the - affine - * Fixed multiframe DICOM issue where ``image_position`` and the translation component in the - ``affine`` could be incorrect - -Documentation -------------- +* Fix location initialization/update in OrthoSlicer3D for permuted axes (pr/1319, pr/1350) + (Guillaume Becq, reviewed by CM) +* Fix DICOM scaling, making frame filtering explicit (pr/1342) (Brendan Moloney, reviewed by CM) +* Fixed multiframe DICOM issue where data could be flipped along slice dimension relative to the + affine (pr/1340) (Brendan Moloney, reviewed by CM) +* Fixed multiframe DICOM issue where ``image_position`` and the translation component in the + ``affine`` could be incorrect (pr/1340) (Brendan Moloney, reviewed by CM) Maintenance ----------- +* Numpy 2.0 compatibility and addressing deprecations in numpy API + (pr/1304, pr/1330, pr/1331, pr/1334, pr/1337) (Jon Haitz Legarreta Gorroño, CM) +* Python 3.13 compatibility (pr/1315) (Sandro from the Fedora Project, reviewed by CM) +* Testing on Python 3.13 with free-threading (pr/1339) (CM) +* Testing on ARM64 Mac OS runners (pr/1320) (CM) +* Proactively address deprecations in coming Python versions (pr/1329, pr/1332, pr/1333) + (Jon Haitz Legarreta Gorroño, reviewed by CM) +* Replace nose-era ``setup()`` and ``teardown()`` functions with pytest equivalents + (pr/1325) (Sandro from the Fedora Project, reviewed by Étienne Mollier and CM) +* Transitioned from blue/isort/flake8 to `ruff `__. (pr/1289) + (Dimitri Papadopoulos, reviewed by CM) +* Vetted and added various rules to the ruff configuration for auto-formatting and style + guide enforcement. (pr/1321, pr/1351, pr/1352, pr/1353, pr/1354, pr/1355, pr/1357, pr/1358, + pr/1359, pr/1360, pr/1361, pr/1362, pr/1363, pr/1364, pr/1368, pr/1369) + (Dimitri Papadopoulos, reviewed by CM) +* Fixing typos when found. (pr/1313, pr/1370) (MB, Dimitri Papadopoulos) +* Applied Repo-Review suggestions (Dimitri Papadopoulos, reviewed by CM) API changes and deprecations ---------------------------- +* Raise :class:`~nibabel.spatialimages.HeaderDataError` from + :func:`~nibabel.nifti1.Nifti1Header.set_qform` if the affine fails to decompose. + This would previously result in :class:`numpy.linalg.LinAlgError`. (pr/1227) (CM) +* The :func:`nibabel.onetime.auto_attr` module can be replaced by :func:`functools.cached_property` + in all supported versions of Python. This alias may be removed in future versions. (pr/1341) (CM) +* Removed the deprecated ``nisext`` (setuptools extensions) package. (pr/1290) (CM, reviewed by MB) 5.2.1 (Monday 26 February 2024) From 607b5cad30119defc3e005c8f25cfc2bb2f505cb Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 10:38:46 -0400 Subject: [PATCH 695/702] DOC: Update Zenodo contributors --- .zenodo.json | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 553aba0548..250611d54d 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -25,6 +25,11 @@ "name": "Cipollini, Ben", "orcid": "0000-0002-7782-0790" }, + { + "affiliation": "CEA", + "name": "Papadopoulos Orfanos, Dimitri", + "orcid": "0000-0002-1242-8990" + }, { "name": "McCarthy, Paul" }, @@ -78,13 +83,11 @@ "orcid": "0000-0001-7159-1387" }, { - "name": "Wang, Hao-Ting", - "orcid": "0000-0003-4078-2038" + "name": "Moloney, Brendan" }, { - "affiliation": "CEA", - "name": "Papadopoulos Orfanos, Dimitri", - "orcid": "0000-0002-1242-8990" + "name": "Wang, Hao-Ting", + "orcid": "0000-0003-4078-2038" }, { "affiliation": "Harvard University - Psychology", @@ -123,9 +126,6 @@ { "name": "S\u00f3lon, Anibal" }, - { - "name": "Moloney, Brendan" - }, { "name": "Morency, F\u00e9lix C." }, @@ -177,6 +177,11 @@ { "name": "Van, Andrew" }, + { + "affiliation": "Brigham and Women's Hospital, Mass General Brigham/Harvard Medical School", + "name": "Legarreta, Jon Haitz", + "orcid": "0000-0002-9661-1396" + }, { "affiliation": "Google", "name": "Gorgolewski, Krzysztof J.", @@ -203,6 +208,9 @@ { "name": "Baker, Eric M." }, + { + "name": "Koudoro, Serge" + }, { "name": "Hayashi, Soichi" }, @@ -220,14 +228,14 @@ "name": "Esteban, Oscar", "orcid": "0000-0001-8435-6191" }, - { - "name": "Koudoro, Serge" - }, { "affiliation": "University College London", "name": "P\u00e9rez-Garc\u00eda, Fernando", "orcid": "0000-0001-9090-3024" }, + { + "name": "Becq, Guillaume" + }, { "name": "Dock\u00e8s, J\u00e9r\u00f4me" }, @@ -270,9 +278,9 @@ "orcid": "0000-0003-1076-5122" }, { - "affiliation": "Brigham and Women's Hospital, Mass General Brigham/Harvard Medical School", - "name": "Legarreta, Jon Haitz", - "orcid": "0000-0002-9661-1396" + "affiliation": "Polytechnique Montr\u00e9al, Montr\u00e9al, CA", + "name": "Newton, Joshua", + "orcid": "0009-0005-6963-3812" }, { "name": "Hahn, Kevin S." @@ -285,6 +293,9 @@ { "name": "Hinds, Oliver P." }, + { + "name": "Sandro" + }, { "name": "Fauber, Bennet" }, @@ -391,11 +402,6 @@ }, { "name": "freec84" - }, - { - "affiliation": "Polytechnique Montréal, Montréal, CA", - "name": "Newton, Joshua", - "orcid": "0009-0005-6963-3812" } ], "keywords": [ From 9bdbc42217321d78578c809b83b38f18102dea93 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 10:44:29 -0400 Subject: [PATCH 696/702] DOC: Update mailmap and contributor list --- .mailmap | 1 + doc/source/index.rst | 3 +++ 2 files changed, 4 insertions(+) diff --git a/.mailmap b/.mailmap index 7b5dfa0d43..43932c865b 100644 --- a/.mailmap +++ b/.mailmap @@ -75,6 +75,7 @@ Oliver P. Hinds Or Duek Oscar Esteban Paul McCarthy +Paul McCarthy Reinder Vos de Wael Roberto Guidotti Roberto Guidotti diff --git a/doc/source/index.rst b/doc/source/index.rst index 72c731d25f..677e81b331 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -130,6 +130,9 @@ contributed code and discussion (in rough order of appearance): * Reinder Vos de Wael * Peter Suter * Blake Dewey +* Guillaume Becq +* Joshua Newton +* Sandro from the Fedora Project License reprise =============== From 5a32a60918be2f73f8345376c30495028bc59046 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 10:45:37 -0400 Subject: [PATCH 697/702] DOC: Remove end year from copyright --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 4255ff1841..9811651223 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -102,7 +102,7 @@ # General information about the project. project = 'NiBabel' -copyright = f"2006-2023, {authors['name']} <{authors['email']}>" +copyright = f"2006, {authors['name']} <{authors['email']}>" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From fcc2957c2a71a645508c38aeada94620de100ce3 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 10:47:51 -0400 Subject: [PATCH 698/702] MNT: Update support matrix for Python and numpy --- .github/workflows/test.yml | 10 ++++----- doc/source/installation.rst | 20 +++++++++--------- pyproject.toml | 12 +++++------ tox.ini | 41 ++++++++++++++++++------------------- 4 files changed, 41 insertions(+), 42 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9e5ddd5162..a741a40714 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -113,17 +113,17 @@ jobs: fail-fast: false matrix: os: ['ubuntu-latest', 'windows-latest', 'macos-13', 'macos-latest'] - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12"] architecture: ['x64', 'x86', 'arm64'] dependencies: ['full', 'pre'] include: # Basic dependencies only - os: ubuntu-latest - python-version: 3.8 + python-version: 3.9 dependencies: 'none' # Absolute minimum dependencies - os: ubuntu-latest - python-version: 3.8 + python-version: 3.9 dependencies: 'min' # NoGIL - os: ubuntu-latest @@ -153,10 +153,10 @@ jobs: - os: macos-13 dependencies: pre # Drop pre tests for SPEC-0-unsupported Python versions - - python-version: '3.8' - dependencies: pre - python-version: '3.9' dependencies: pre + - python-version: '3.10' + dependencies: pre env: DEPENDS: ${{ matrix.dependencies }} diff --git a/doc/source/installation.rst b/doc/source/installation.rst index 4f747e7feb..983968c50f 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -81,16 +81,16 @@ is for you. Requirements ------------ -.. check these against pyproject.toml - -* Python_ 3.8 or greater -* NumPy_ 1.20 or greater -* Packaging_ 17.0 or greater -* importlib-resources_ 1.3 or greater (or Python 3.9+) -* SciPy_ (optional, for full SPM-ANALYZE support) -* h5py_ (optional, for MINC2 support) -* PyDICOM_ 1.0.0 or greater (optional, for DICOM support) -* `Python Imaging Library`_ (optional, for PNG conversion in DICOMFS) +.. check these against pyproject.toml / tox.ini + +* Python_ 3.9 or greater +* NumPy_ 1.22 or greater +* Packaging_ 20.0 or greater +* importlib-resources_ 5.12 or greater (or Python 3.12+) +* SciPy_ 1.8 or greater (optional, for full SPM-ANALYZE support) +* h5py_ 3.5 or greater (optional, for MINC2 support) +* PyDICOM_ 2.3.0 or greater (optional, for DICOM support) +* `Python Imaging Library`_ 8.4 or greater (optional, for PNG conversion in DICOMFS) * pytest_ (optional, to run the tests) * sphinx_ (optional, to build the documentation) diff --git a/pyproject.toml b/pyproject.toml index 18883b90ec..b62c0048af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,10 +9,10 @@ authors = [{ name = "NiBabel developers", email = "neuroimaging@python.org" }] maintainers = [{ name = "Christopher Markiewicz" }] readme = "README.rst" license = { text = "MIT License" } -requires-python = ">=3.8" +requires-python = ">=3.9" dependencies = [ - "numpy >=1.20", - "packaging >=17", + "numpy >=1.22", + "packaging >=20", "importlib_resources >=5.12; python_version < '3.12'", "typing_extensions >=4.6; python_version < '3.13'", ] @@ -23,11 +23,11 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering", ] # Version from setuptools_scm @@ -53,7 +53,7 @@ parrec2nii = "nibabel.cmdline.parrec2nii:main" [project.optional-dependencies] all = ["nibabel[dicomfs,minc2,spm,zstd]"] # Features -dicom = ["pydicom >=1.0.0"] +dicom = ["pydicom >=2.3"] dicomfs = ["nibabel[dicom]", "pillow"] minc2 = ["h5py"] spm = ["scipy"] @@ -62,7 +62,7 @@ zstd = ["pyzstd >= 0.14.3"] # tox should use these with extras instead of duplicating doc = [ "sphinx", - "matplotlib>=1.5.3", + "matplotlib>=3.5", "numpydoc", "texext", "tomli; python_version < '3.11'", diff --git a/tox.ini b/tox.ini index 0e0f81a7ae..82c13debc6 100644 --- a/tox.ini +++ b/tox.ini @@ -7,14 +7,14 @@ requires = tox>=4 envlist = # No preinstallations - py3{8,9,10,11,12}-none + py3{9,10,11,12,13}-none # Minimum Python - py38-{min,full} + py39-{min,full} # x86 support range py3{9,10,11}-{full,pre}-{x86,x64} py3{9,10,11}-pre-{x86,x64} # x64-only range - py312-{full,pre}-x64 + py3{12,13}-{full,pre}-x64 # Special environment for numpy 2.0-dev testing py313-dev-x64 install @@ -26,7 +26,6 @@ skip_missing_interpreters = true # Configuration that allows us to split tests across GitHub runners effectively [gh-actions] python = - 3.8: py38 3.9: py39 3.10: py310 3.11: py311 @@ -76,35 +75,35 @@ set_env = extras = test deps = # General minimum dependencies: pin based on API usage - min: packaging ==17 + # matplotlib 3.5 requires packaging 20 + min: packaging ==20 min: importlib_resources ==5.12; python_version < '3.12' min: typing_extensions ==4.6; python_version < '3.13' # NEP29/SPEC0 + 1yr: Test on minor release series within the last 3 years # We're extending this to all optional dependencies # This only affects the range that we test on; numpy is the only non-optional # dependency, and will be the only one to affect pip environment resolution. - min: numpy ==1.20 - min: h5py ==2.10 - min: indexed_gzip ==1.4 - min: matplotlib ==3.4 - min: pillow ==8.1 - min: pydicom ==2.1 - min: pyzstd ==0.14.3 - min: scipy ==1.6 + min: numpy ==1.22 + min: h5py ==3.5 + min: indexed_gzip ==1.6 + min: matplotlib ==3.5 + min: pillow ==8.4 + min: pydicom ==2.3 + min: pyzstd ==0.15.2 + min: scipy ==1.8 # Numpy 2.0 is a major breaking release; we cannot put much effort into # supporting until it's at least RC stable - pre: numpy <2.0.dev0 dev: numpy >=2.1.dev0 # Scipy stopped producing win32 wheels at py310 - py3{8,9}-full-x86,x64,arm64: scipy >=1.6 + py39-full-x86,x64,arm64: scipy >=1.8 # Matplotlib depends on scipy, so cannot be built for py310 on x86 - py3{8,9}-full-x86,x64,arm64: matplotlib >=3.4 + py39-full-x86,x64,arm64: matplotlib >=3.5 # h5py stopped producing win32 wheels at py39 - py38-full-x86,{full,pre}-{x64,arm64}: h5py >=2.10 - full,pre,dev: pillow >=8.1 - full,pre: indexed_gzip >=1.4 - full,pre,dev: pyzstd >=0.14.3 - full,pre: pydicom >=2.1 + {full,pre}-{x64,arm64}: h5py >=3.5 + full,pre,dev: pillow >=8.4 + full,pre: indexed_gzip >=1.6 + full,pre,dev: pyzstd >=0.15.2 + full,pre: pydicom >=2.3 dev: pydicom @ git+https://github.com/pydicom/pydicom.git@main commands = From 1d93526980d3b9107c49d2788bc04da3cfaf89ce Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 11:13:43 -0400 Subject: [PATCH 699/702] MNT: Remove workarounds used for Python 3.8 support --- nibabel/__init__.py | 5 +---- nibabel/conftest.py | 7 ++----- nibabel/filebasedimages.py | 2 +- nibabel/nicom/ascconv.py | 5 +---- nibabel/nifti1.py | 2 +- nibabel/spatialimages.py | 6 +----- nibabel/testing/__init__.py | 6 +----- nibabel/testing/np_features.py | 4 ++-- nibabel/tests/test_arrayproxy.py | 8 +++++--- nibabel/tests/test_init.py | 6 +----- nibabel/tests/test_openers.py | 5 +++-- nibabel/volumeutils.py | 2 +- 12 files changed, 20 insertions(+), 38 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index aa90540b8f..c389c603fc 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -170,10 +170,7 @@ def bench(label=None, verbose=1, extra_argv=None): code : ExitCode Returns the result of running the tests as a ``pytest.ExitCode`` enum """ - try: - from importlib.resources import as_file, files - except ImportError: - from importlib_resources import as_file, files + from importlib.resources import as_file, files args = [] if extra_argv is not None: diff --git a/nibabel/conftest.py b/nibabel/conftest.py index b16a832f28..1d7389e867 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -10,10 +10,7 @@ @pytest.fixture(scope='session', autouse=True) def legacy_printoptions(): - from packaging.version import Version - - if Version(np.__version__) >= Version('1.22'): - np.set_printoptions(legacy='1.21') + np.set_printoptions(legacy='1.21') @pytest.fixture @@ -24,7 +21,7 @@ def max_digits(): orig_max_str_digits = sys.get_int_max_str_digits() yield sys.set_int_max_str_digits sys.set_int_max_str_digits(orig_max_str_digits) - except AttributeError: # pragma: no cover + except AttributeError: # PY310 # pragma: no cover # Nothing to do for versions of Python that lack these methods # They were added as DoS protection in Python 3.11 and backported to # some other versions. diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index c12644a2bd..086e31f123 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -23,7 +23,7 @@ if ty.TYPE_CHECKING: from .filename_parser import ExtensionSpec, FileSpec -FileSniff = ty.Tuple[bytes, str] +FileSniff = tuple[bytes, str] ImgT = ty.TypeVar('ImgT', bound='FileBasedImage') HdrT = ty.TypeVar('HdrT', bound='FileBasedHeader') diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index 6d72436039..2eca5a1579 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -90,10 +90,7 @@ def assign2atoms(assign_ast, default_class=int): target = target.value prev_target_type = OrderedDict elif isinstance(target, ast.Subscript): - if isinstance(target.slice, ast.Constant): # PY39 - index = target.slice.value - else: # PY38 - index = target.slice.value.n + index = target.slice.value atoms.append(Atom(target, prev_target_type, index)) target = target.value prev_target_type = list diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index b9c78c81bc..f0bd91fc48 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -671,7 +671,7 @@ def _mangle(self, dataset: DicomDataset) -> bytes: (38, 'eval', NiftiExtension), (40, 'matlab', NiftiExtension), (42, 'quantiphyse', NiftiExtension), - (44, 'mrs', NiftiExtension[ty.Dict[str, ty.Any]]), + (44, 'mrs', NiftiExtension[dict[str, ty.Any]]), ), fields=('code', 'label', 'handler'), ) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 19677c1a7d..a8e8993597 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -133,6 +133,7 @@ from __future__ import annotations import typing as ty +from functools import cache from typing import Literal import numpy as np @@ -145,11 +146,6 @@ from .viewers import OrthoSlicer3D from .volumeutils import shape_zoom_affine -try: - from functools import cache -except ImportError: # PY38 - from functools import lru_cache as cache - if ty.TYPE_CHECKING: import io from collections.abc import Sequence diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index be111747b2..b42baf2955 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -17,6 +17,7 @@ import unittest import warnings from contextlib import nullcontext +from importlib.resources import as_file, files from itertools import zip_longest import numpy as np @@ -29,11 +30,6 @@ if ty.TYPE_CHECKING: from importlib.resources.abc import Traversable -try: - from importlib.resources import as_file, files -except ImportError: # PY38 - from importlib_resources import as_file, files - def get_test_data( subdir: ty.Literal['gifti', 'nicom', 'externals'] | None = None, diff --git a/nibabel/testing/np_features.py b/nibabel/testing/np_features.py index 226df64845..dd21aac2c0 100644 --- a/nibabel/testing/np_features.py +++ b/nibabel/testing/np_features.py @@ -1,11 +1,11 @@ """Look for changes in numpy behavior over versions""" -from functools import lru_cache +from functools import cache import numpy as np -@lru_cache(maxsize=None) +@cache def memmap_after_ufunc() -> bool: """Return True if ufuncs on memmap arrays always return memmap arrays diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index a79f63bc72..65b9131905 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -482,9 +482,11 @@ def test_keep_file_open_true_false_invalid(): for test in tests: filetype, kfo, have_igzip, exp_persist, exp_kfo = test - with InTemporaryDirectory(), mock.patch( - 'nibabel.openers.ImageOpener', CountingImageOpener - ), patch_indexed_gzip(have_igzip): + with ( + InTemporaryDirectory(), + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), + patch_indexed_gzip(have_igzip), + ): fname = f'testdata.{filetype}' # create the test data file if filetype == 'gz': diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index d54f55053b..d339c4e26b 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -1,14 +1,10 @@ import pathlib import unittest +from importlib.resources import files from unittest import mock import pytest -try: - from importlib.resources import files -except ImportError: - from importlib_resources import files - import nibabel as nib diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 0b58794331..05d0e04cd0 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -121,8 +121,9 @@ def patch_indexed_gzip(state): values = (True, MockIndexedGzipFile) else: values = (False, GzipFile) - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), mock.patch( - 'nibabel.openers.IndexedGzipFile', values[1], create=True + with ( + mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), + mock.patch('nibabel.openers.IndexedGzipFile', values[1], create=True), ): yield diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 6e43f79186..d0ebb46a7b 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -235,7 +235,7 @@ def value_set(self, name: str | None = None) -> OrderedSet: endian_codes = Recoder(_endian_codes) -class DtypeMapper(ty.Dict[ty.Hashable, ty.Hashable]): +class DtypeMapper(dict[ty.Hashable, ty.Hashable]): """Specialized mapper for numpy dtypes We pass this mapper into the Recoder class to deal with numpy dtype From 48dcb4702f8cea1f21fe1fe7a38ad80132715073 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 11:14:09 -0400 Subject: [PATCH 700/702] STY: ruff check --fix --- nibabel/tests/test_tripwire.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index d7daefe0b1..4bf91923f2 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -16,5 +16,5 @@ def test_tripwire(): with pytest.raises(TripWireError): silly_module_name.do_silly_thing # Check AttributeError can be checked too - with pytest.raises(AttributeError) as err: + with pytest.raises(AttributeError): silly_module_name.__wrapped__ From 249986b169f7845c6ce8e19ac36546aef2763fd1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 8 Oct 2024 13:54:03 -0400 Subject: [PATCH 701/702] MNT: Update release notes translator --- tools/markdown_release_notes.py | 56 ++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/tools/markdown_release_notes.py b/tools/markdown_release_notes.py index 73bdbf7752..cdae474f51 100644 --- a/tools/markdown_release_notes.py +++ b/tools/markdown_release_notes.py @@ -1,14 +1,53 @@ #!/usr/bin/env python import re import sys +from collections import defaultdict +from functools import cache +from operator import call from pathlib import Path +from sphinx.ext.intersphinx import fetch_inventory + CHANGELOG = Path(__file__).parent.parent / 'Changelog' # Match release lines like "5.2.0 (Monday 11 December 2023)" RELEASE_REGEX = re.compile(r"""((?:\d+)\.(?:\d+)\.(?:\d+)) \(\w+ \d{1,2} \w+ \d{4}\)$""") +class MockConfig: + intersphinx_timeout: int | None = None + tls_verify = False + tls_cacerts: str | dict[str, str] | None = None + user_agent: str = '' + + +@call +class MockApp: + srcdir = '' + config = MockConfig() + + +fetch_inv = cache(fetch_inventory) + + +def get_intersphinx(obj): + module = obj.split('.', 1)[0] + + registry = defaultdict(lambda: 'https://docs.python.org/3') + registry.update( + numpy='https://numpy.org/doc/stable', + ) + + base_url = registry[module] + + inventory = fetch_inv(MockApp, '', f'{base_url}/objects.inv') + # Check py: first, then whatever + for objclass in sorted(inventory, key=lambda x: not x.startswith('py:')): + if obj in inventory[objclass]: + return f'{base_url}/{inventory[objclass][obj][2]}' + raise ValueError("Couldn't lookup {obj}") + + def main(): version = sys.argv[1] output = sys.argv[2] @@ -46,7 +85,7 @@ def main(): release_notes = re.sub(r'\n +', ' ', release_notes) # Replace pr/ with # for GitHub - release_notes = re.sub(r'\(pr/(\d+)\)', r'(#\1)', release_notes) + release_notes = re.sub(r'pr/(\d+)', r'#\1', release_notes) # Replace :mod:`package.X` with [package.X](...) release_notes = re.sub( @@ -76,6 +115,14 @@ def main(): r'[\3](https://nipy.org/nibabel/reference/\1.html#\1.\2.\3)', release_notes, ) + # Replace ::`` with intersphinx lookup + for ref in re.findall(r'(:[^:]*:`~?\w[\w.]+\w`)', release_notes): + objclass, tilde, module, obj = re.match(r':([^:]*):`(~?)([\w.]+)\.(\w+)`', ref).groups() + url = get_intersphinx(f'{module}.{obj}') + mdlink = f'[{"" if tilde else module}{obj}]({url})' + release_notes = release_notes.replace(ref, mdlink) + # Replace RST links with Markdown links + release_notes = re.sub(r'`([^<`]*) <([^>]*)>`_+', r'[\1](\2)', release_notes) def python_doc(match): module = match.group(1) @@ -84,10 +131,9 @@ def python_doc(match): release_notes = re.sub(r':meth:`~([\w.]+)\.(\w+)`', python_doc, release_notes) - output.write('## Release notes\n\n') - output.write(release_notes) - - output.close() + with output: + output.write('## Release notes\n\n') + output.write(release_notes) if __name__ == '__main__': From 22980e36de9ec821128765109741a619a94e7766 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 10 Oct 2024 09:03:16 -0400 Subject: [PATCH 702/702] TEST: Do not depend on test order in test_api_validators --- nibabel/tests/test_api_validators.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index a4e787465a..2388089f2c 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -99,18 +99,18 @@ class TestRunAllTests(ValidateAPI): We check this in the module teardown function """ - run_tests = [] + run_tests = {} def obj_params(self): yield 1, 2 def validate_first(self, obj, param): - self.run_tests.append('first') + self.run_tests.add('first') def validate_second(self, obj, param): - self.run_tests.append('second') + self.run_tests.add('second') @classmethod def teardown_class(cls): # Check that both validate_xxx tests got run - assert cls.run_tests == ['first', 'second'] + assert cls.run_tests == {'first', 'second'}