Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix coding style and PEP8 violations #86

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions sigmf/apps/convert_wav.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ def main():
parser = argparse.ArgumentParser(description="Convert .wav to .sigmf container.")
parser.add_argument("input", type=str, help="Wavfile path")
parser.add_argument("--author", type=str, default=None, help=f"set {SigMFFile.AUTHOR_KEY} metadata")
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('--version', action='version', version=f'%(prog)s v{toolversion}')
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--version", action="version", version=f"%(prog)s v{toolversion}")
args = parser.parse_args()

level_lut = {
Expand Down
6 changes: 5 additions & 1 deletion sigmf/error.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,25 @@


class SigMFError(Exception):
""" SigMF base exception."""
"""SigMF base exception."""

pass


class SigMFValidationError(SigMFError):
"""Exceptions related to validating SigMF metadata."""

pass


class SigMFAccessError(SigMFError):
"""Exceptions related to accessing the contents of SigMF metadata, notably
when expected fields are missing or accessing out of bounds captures."""

pass


class SigMFFileError(SigMFError):
"""Exceptions related to reading or writing SigMF files or archives."""

pass
17 changes: 7 additions & 10 deletions sigmf/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,27 +4,24 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later

'''Schema IO'''
"""Schema IO"""

import json
import os

from . import utils

SCHEMA_META = 'schema-meta.json'
SCHEMA_COLLECTION = 'schema-collection.json'
SCHEMA_META = "schema-meta.json"
SCHEMA_COLLECTION = "schema-collection.json"


def get_schema(version=None, schema_file=SCHEMA_META):
'''
"""
Load JSON Schema to for either a `sigmf-meta` or `sigmf-collection`.

TODO: In the future load specific schema versions.
'''
schema_path = os.path.join(
utils.get_schema_path(os.path.dirname(utils.__file__)),
schema_file
)
with open(schema_path, 'rb') as handle:
"""
schema_path = os.path.join(utils.get_schema_path(os.path.dirname(utils.__file__)), schema_file)
with open(schema_path, "rb") as handle:
schema = json.load(handle)
return schema
2 changes: 1 addition & 1 deletion sigmf/sigmf_hash.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later

'''Hashing Functions'''
"""Hashing Functions"""

import hashlib
import os
Expand Down
107 changes: 86 additions & 21 deletions sigmf/sigmffile.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,16 +148,50 @@ class SigMFFile(SigMFMetafile):
CAPTURE_KEY = "captures"
ANNOTATION_KEY = "annotations"
VALID_GLOBAL_KEYS = [
AUTHOR_KEY, COLLECTION_KEY, DATASET_KEY, DATATYPE_KEY, DATA_DOI_KEY, DESCRIPTION_KEY, EXTENSIONS_KEY,
GEOLOCATION_KEY, HASH_KEY, HW_KEY, LICENSE_KEY, META_DOI_KEY, METADATA_ONLY_KEY, NUM_CHANNELS_KEY, RECORDER_KEY,
SAMPLE_RATE_KEY, START_OFFSET_KEY, TRAILING_BYTES_KEY, VERSION_KEY
AUTHOR_KEY,
COLLECTION_KEY,
DATASET_KEY,
DATATYPE_KEY,
DATA_DOI_KEY,
DESCRIPTION_KEY,
EXTENSIONS_KEY,
GEOLOCATION_KEY,
HASH_KEY,
HW_KEY,
LICENSE_KEY,
META_DOI_KEY,
METADATA_ONLY_KEY,
NUM_CHANNELS_KEY,
RECORDER_KEY,
SAMPLE_RATE_KEY,
START_OFFSET_KEY,
TRAILING_BYTES_KEY,
VERSION_KEY,
]
VALID_CAPTURE_KEYS = [
DATETIME_KEY,
FREQUENCY_KEY,
HEADER_BYTES_KEY,
GLOBAL_INDEX_KEY,
START_INDEX_KEY,
]
VALID_CAPTURE_KEYS = [DATETIME_KEY, FREQUENCY_KEY, HEADER_BYTES_KEY, GLOBAL_INDEX_KEY, START_INDEX_KEY]
VALID_ANNOTATION_KEYS = [
COMMENT_KEY, FHI_KEY, FLO_KEY, GENERATOR_KEY, LABEL_KEY, LAT_KEY, LENGTH_INDEX_KEY, LON_KEY, START_INDEX_KEY,
UUID_KEY
COMMENT_KEY,
FHI_KEY,
FLO_KEY,
GENERATOR_KEY,
LABEL_KEY,
LAT_KEY,
LENGTH_INDEX_KEY,
LON_KEY,
START_INDEX_KEY,
UUID_KEY,
]
VALID_KEYS = {GLOBAL_KEY: VALID_GLOBAL_KEYS, CAPTURE_KEY: VALID_CAPTURE_KEYS, ANNOTATION_KEY: VALID_ANNOTATION_KEYS}
VALID_KEYS = {
GLOBAL_KEY: VALID_GLOBAL_KEYS,
CAPTURE_KEY: VALID_CAPTURE_KEYS,
ANNOTATION_KEY: VALID_ANNOTATION_KEYS,
}

def __init__(self, metadata=None, data_file=None, global_info=None, skip_checksum=False, map_readonly=True):
"""
Expand Down Expand Up @@ -204,7 +238,7 @@ def __next__(self):
raise StopIteration

def __getitem__(self, sli):
mem = self._memmap[sli] # matches behavior of numpy.ndarray.__getitem__()
mem = self._memmap[sli] # matches behavior of numpy.ndarray.__getitem__()

if self._return_type is None:
return mem
Expand Down Expand Up @@ -333,7 +367,7 @@ def add_capture(self, start_index, metadata=None):
# sort captures by start_index
self._metadata[self.CAPTURE_KEY] = sorted(
capture_list,
key=lambda item: item[self.START_INDEX_KEY]
key=lambda item: item[self.START_INDEX_KEY],
)

def get_captures(self):
Expand Down Expand Up @@ -374,13 +408,17 @@ def get_capture_byte_boundarys(self, index):
compliant or noncompliant SigMF Recordings.
"""
if index >= len(self.get_captures()):
raise SigMFAccessError("Invalid captures index {} (only {} captures in Recording)".format(index, len(self.get_captures())))
raise SigMFAccessError(
"Invalid captures index {} (only {} captures in Recording)".format(index, len(self.get_captures()))
)

start_byte = 0
prev_start_sample = 0
for ii, capture in enumerate(self.get_captures()):
start_byte += capture.get(self.HEADER_BYTES_KEY, 0)
start_byte += (self.get_capture_start(ii) - prev_start_sample) * self.get_sample_size() * self.get_num_channels()
start_byte += (
(self.get_capture_start(ii) - prev_start_sample) * self.get_sample_size() * self.get_num_channels()
)
prev_start_sample = self.get_capture_start(ii)
if ii >= index:
break
Expand All @@ -389,7 +427,11 @@ def get_capture_byte_boundarys(self, index):
if index == len(self.get_captures()) - 1: # last captures...data is the rest of the file
end_byte = path.getsize(self.data_file) - self.get_global_field(self.TRAILING_BYTES_KEY, 0)
else:
end_byte += (self.get_capture_start(index+1) - self.get_capture_start(index)) * self.get_sample_size() * self.get_num_channels()
end_byte += (
(self.get_capture_start(index + 1) - self.get_capture_start(index))
* self.get_sample_size()
* self.get_num_channels()
)
return (start_byte, end_byte)

def add_annotation(self, start_index, length=None, metadata=None):
Expand All @@ -408,7 +450,7 @@ def add_annotation(self, start_index, length=None, metadata=None):
# sort annotations by start_index
self._metadata[self.ANNOTATION_KEY] = sorted(
self._metadata[self.ANNOTATION_KEY],
key=lambda item: item[self.START_INDEX_KEY]
key=lambda item: item[self.START_INDEX_KEY],
)

def get_annotations(self, index=None):
Expand Down Expand Up @@ -465,13 +507,18 @@ def _count_samples(self):
header_bytes = sum([c.get(self.HEADER_BYTES_KEY, 0) for c in self.get_captures()])
file_size = path.getsize(self.data_file) if self.data_size_bytes is None else self.data_size_bytes
file_data_size = file_size - self.get_global_field(self.TRAILING_BYTES_KEY, 0) - header_bytes # bytes
sample_size = self.get_sample_size() # size of a sample in bytes
sample_size = self.get_sample_size() # size of a sample in bytes
num_channels = self.get_num_channels()
sample_count = file_data_size // sample_size // num_channels
if file_data_size % (sample_size * num_channels) != 0:
warnings.warn(f"File `{self.data_file}` does not contain an integer number of samples across channels. It may be invalid data.")
warnings.warn(
f"File `{self.data_file}` does not contain an integer number of samples across channels. "
"It may be invalid data."
)
if self._get_sample_count_from_annotations() > sample_count:
warnings.warn(f"File `{self.data_file}` ends before the final annotation in the corresponding SigMF metadata.")
warnings.warn(
f"File `{self.data_file}` ends before the final annotation in the corresponding SigMF metadata."
)
self.sample_count = sample_count
return sample_count

Expand Down Expand Up @@ -502,17 +549,27 @@ def calculate_hash(self):
"""
old_hash = self.get_global_field(self.HASH_KEY)
if self.data_file is not None:
new_hash = sigmf_hash.calculate_sha512(self.data_file, offset=self.data_offset, size=self.data_size_bytes)
new_hash = sigmf_hash.calculate_sha512(
self.data_file,
offset=self.data_offset,
size=self.data_size_bytes,
)
else:
new_hash = sigmf_hash.calculate_sha512(fileobj=self.data_buffer, offset=self.data_offset, size=self.data_size_bytes)
new_hash = sigmf_hash.calculate_sha512(
fileobj=self.data_buffer,
offset=self.data_offset,
size=self.data_size_bytes,
)
if old_hash is not None:
if old_hash != new_hash:
raise SigMFFileError("Calculated file hash does not match associated metadata.")

self.set_global_field(self.HASH_KEY, new_hash)
return new_hash

def set_data_file(self, data_file=None, data_buffer=None, skip_checksum=False, offset=0, size_bytes=None, map_readonly=True):
def set_data_file(
self, data_file=None, data_buffer=None, skip_checksum=False, offset=0, size_bytes=None, map_readonly=True
):
"""
Set the datafile path, then recalculate sample count. If not skipped,
update the hash and return the hash string.
Expand Down Expand Up @@ -727,7 +784,13 @@ class SigMFCollection(SigMFMetafile):
STREAMS_KEY = "core:streams"
COLLECTION_KEY = "collection"
VALID_COLLECTION_KEYS = [
AUTHOR_KEY, COLLECTION_DOI_KEY, DESCRIPTION_KEY, EXTENSIONS_KEY, LICENSE_KEY, STREAMS_KEY, VERSION_KEY
AUTHOR_KEY,
COLLECTION_DOI_KEY,
DESCRIPTION_KEY,
EXTENSIONS_KEY,
LICENSE_KEY,
STREAMS_KEY,
VERSION_KEY,
]
VALID_KEYS = {COLLECTION_KEY: VALID_COLLECTION_KEYS}

Expand Down Expand Up @@ -781,7 +844,9 @@ def verify_stream_hashes(self):
if path.isfile(metafile_name):
new_hash = sigmf_hash.calculate_sha512(filename=metafile_name)
if old_hash != new_hash:
raise SigMFFileError(f"Calculated file hash for {metafile_name} does not match collection metadata.")
raise SigMFFileError(
f"Calculated file hash for {metafile_name} does not match collection metadata."
)

def set_streams(self, metafiles):
"""
Expand Down
6 changes: 3 additions & 3 deletions sigmf/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ def parse_iso8601_datetime(datestr: str) -> datetime:
if match:
md = match.groupdict()
length = min(7, len(md["frac"]))
datestr = ''.join([md["dt"], md["frac"][:length], "Z"])
datestr = "".join([md["dt"], md["frac"][:length], "Z"])

try:
timestamp = datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%S.%fZ')
timestamp = datetime.strptime(datestr, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
timestamp = datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%SZ')
timestamp = datetime.strptime(datestr, "%Y-%m-%dT%H:%M:%SZ")
return timestamp


Expand Down
2 changes: 1 addition & 1 deletion tests/test_archivereader.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def test_access_data_without_untar(self):


def test_archiveread_data_file_unchanged(test_sigmffile):
with tempfile.NamedTemporaryFile(suffix='.sigmf') as temp:
with tempfile.NamedTemporaryFile(suffix=".sigmf") as temp:
input_samples = test_sigmffile.read_samples()
test_sigmffile.archive(temp.name)

Expand Down
32 changes: 23 additions & 9 deletions tests/test_utils.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's not change this file. This is one of those edge cases where black makes things much worse.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you can insert # fmt: skip to skip an individual line for the formatter or use # fmt: off and # fmt: on to disable for regions.

Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,29 @@
from sigmf import utils


@pytest.mark.parametrize("ts, expected", [
("1955-07-04T05:15:00Z", datetime(year=1955, month=7, day=4, hour=5, minute=15, second=00, microsecond=0)),
("2956-08-05T06:15:12Z", datetime(year=2956, month=8, day=5, hour=6, minute=15, second=12, microsecond=0)),
("3957-09-06T07:15:12.345Z", datetime(year=3957, month=9, day=6, hour=7, minute=15, second=12, microsecond=345000)),
("4958-10-07T08:15:12.0345Z", datetime(year=4958, month=10, day=7, hour=8, minute=15, second=12, microsecond=34500)),
("5959-11-08T09:15:12.000000Z", datetime(year=5959, month=11, day=8, hour=9, minute=15, second=12, microsecond=0)),
("6960-12-09T10:15:12.123456789123Z", datetime(year=6960, month=12, day=9, hour=10, minute=15, second=12, microsecond=123456)),

])
@pytest.mark.parametrize(
"ts, expected",
[
("1955-07-04T05:15:00Z", datetime(year=1955, month=7, day=4, hour=5, minute=15, second=00, microsecond=0)),
("2956-08-05T06:15:12Z", datetime(year=2956, month=8, day=5, hour=6, minute=15, second=12, microsecond=0)),
(
"3957-09-06T07:15:12.345Z",
datetime(year=3957, month=9, day=6, hour=7, minute=15, second=12, microsecond=345000),
),
(
"4958-10-07T08:15:12.0345Z",
datetime(year=4958, month=10, day=7, hour=8, minute=15, second=12, microsecond=34500),
),
(
"5959-11-08T09:15:12.000000Z",
datetime(year=5959, month=11, day=8, hour=9, minute=15, second=12, microsecond=0),
),
(
"6960-12-09T10:15:12.123456789123Z",
datetime(year=6960, month=12, day=9, hour=10, minute=15, second=12, microsecond=123456),
),
],
)
def test_parse_simple_iso8601(ts, expected):
dt = utils.parse_iso8601_datetime(ts)
assert dt == expected
Loading