From 29feb3b27988b8af7a9cbd71a91d95de8b41e89b Mon Sep 17 00:00:00 2001 From: scott Date: Wed, 24 Jan 2024 14:22:19 -0500 Subject: [PATCH 1/2] add more linters to ruff --- pyproject.toml | 44 +++++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0ef8e2e..73a674b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,26 +48,44 @@ version_scheme = "no-guess-dev" # Will not guess the next version [tool.ruff] src = ["src"] +unsafe-fixes = true +select = [ + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "D", # pydocstyle + "E", # pycodestyle (errors) + "W", # pycodestyle (warnings) + # "EM", # flake8-errmsg + "EXE", # flake8-executable + "F", # Pyflakes + "I", # isort + "ISC", # flake8-implicit-str-concat + "N", # pep8-naming + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # Pylint + # "PT", # flake8-pytest-style + "PTH", # flake8-use-pathlib + "PYI", # flake8-pyi + "RUF", # Ruff-specific rules +] + ignore = [ "D100", # Missing docstring in public module "D104", # Missing docstring in public package "D105", # Missing docstring in magic method "D203", # 1 blank line required before class docstring - "D212", # Multi-line docstring summary should start at the first line + "D213", # Multi-line docstring summary should start at the second line + "N803", # Argument name should be lowercase + "N806", # Variable _ in function should be lowercase + "PIE796", # Non-unique values are redundant and likely a mistake. "PLR", # Pylint Refactor + "PTH123", # `open()` should be replaced by `Path.open()` + "PTH207", # "Replace `glob` with `Path.glob` or `Path.rglob` ] -[tool.ruff.lint] -# Enable the isort rules. -extend-select = ["I"] - -[tool.black] -target-version = ["py38", "py39", "py310", "py311"] -preview = true - -[tool.isort] -profile = "black" -known_first_party = ["dolphin"] [tool.mypy] python_version = "3.10" @@ -76,7 +94,7 @@ plugins = ["pydantic.mypy"] [tool.ruff.per-file-ignores] "**/__init__.py" = ["F401"] -"test/**" = ["D"] +"tests/**" = ["D"] [tool.pytest.ini_options] doctest_optionflags = "NORMALIZE_WHITESPACE NUMBER" From f8f58672077eb62608da2d812fb5802605389f41 Mon Sep 17 00:00:00 2001 From: scott Date: Wed, 24 Jan 2024 14:33:29 -0500 Subject: [PATCH 2/2] add auto fixes, relax some --- pyproject.toml | 11 +--- .../release/generate_product_docx_table.py | 14 ++--- scripts/release/list_packages.py | 4 -- scripts/release/setup_delivery_config.py | 7 +-- scripts/run_repeated_nrt.py | 56 ++++++++--------- src/disp_s1/cli/run.py | 1 - src/disp_s1/create.py | 5 +- src/disp_s1/ionosphere.py | 4 +- src/disp_s1/main.py | 6 +- src/disp_s1/pge_runconfig.py | 20 +++---- src/disp_s1/plotting.py | 4 +- src/disp_s1/product.py | 60 ++++++++++--------- src/disp_s1/product_info.py | 10 ++-- src/disp_s1/utils.py | 5 +- src/disp_s1/validate.py | 4 -- tests/make_netcdf.py | 15 +++-- 16 files changed, 108 insertions(+), 118 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 73a674b..9d8f02b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,24 +52,17 @@ unsafe-fixes = true select = [ "A", # flake8-builtins "ARG", # flake8-unused-arguments - "B", # flake8-bugbear "C4", # flake8-comprehensions "D", # pydocstyle "E", # pycodestyle (errors) "W", # pycodestyle (warnings) - # "EM", # flake8-errmsg "EXE", # flake8-executable "F", # Pyflakes "I", # isort "ISC", # flake8-implicit-str-concat "N", # pep8-naming - "PGH", # pygrep-hooks - "PIE", # flake8-pie - "PL", # Pylint - # "PT", # flake8-pytest-style "PTH", # flake8-use-pathlib "PYI", # flake8-pyi - "RUF", # Ruff-specific rules ] ignore = [ @@ -93,8 +86,8 @@ ignore_missing_imports = true plugins = ["pydantic.mypy"] [tool.ruff.per-file-ignores] -"**/__init__.py" = ["F401"] -"tests/**" = ["D"] +"**/__init__.py" = ["F403"] +"tests/**" = ["D", "N", "PTH"] [tool.pytest.ini_options] doctest_optionflags = "NORMALIZE_WHITESPACE NUMBER" diff --git a/scripts/release/generate_product_docx_table.py b/scripts/release/generate_product_docx_table.py index 54f8c58..8b8f055 100755 --- a/scripts/release/generate_product_docx_table.py +++ b/scripts/release/generate_product_docx_table.py @@ -77,13 +77,13 @@ def append_dset_to_table(name, item): description = item.attrs.get("long_name", "") units = item.attrs.get("units", "") table_data.append( - dict( - Name=name, - Type=data_type, - Shape=shape, - Units=units, - Description=description, - ) + { + "Name": name, + "Type": data_type, + "Shape": shape, + "Units": units, + "Description": description, + } ) with h5py.File(hdf5_path, "r") as hf: diff --git a/scripts/release/list_packages.py b/scripts/release/list_packages.py index 4b76a4f..e10879b 100755 --- a/scripts/release/list_packages.py +++ b/scripts/release/list_packages.py @@ -33,14 +33,10 @@ class Package: class CommandNotFoundError(Exception): """Raised when a required Unix shell command was not found.""" - pass - class YumListIsAnnoyingError(Exception): """Raised when 'yum list' does something annoying.""" - pass - def check_command(cmd: str) -> bool: """Check if a Unix shell command is available.""" diff --git a/scripts/release/setup_delivery_config.py b/scripts/release/setup_delivery_config.py index 6539c81..e0add2f 100644 --- a/scripts/release/setup_delivery_config.py +++ b/scripts/release/setup_delivery_config.py @@ -8,7 +8,7 @@ # see `ionosphere.download_ionex_for_slcs`. # The troposphere file download is missing. Dummy files were created. # for d in `ls input_slcs/t042_088905_iw1* | awk -F'_' '{print $5}' | cut -d'.' -f1`; do -# touch dynamic_ancillary_files/troposphere_files/ERA5_N30_N40_W120_W110_${d}_14.grb; +# touch dynamic_ancillary_files/troposphere_files/ERA5_N30_N40_W120_W110_${d}_14.grb; # done @@ -30,7 +30,6 @@ def setup_delivery(cfg_dir: Path, mode: ProcessingMode): " --amplitude-mean-files ./dynamic_ancillary_files/ps_files/*mean*" " --amplitude-dispersion-files ./dynamic_ancillary_files/ps_files/*dispersion*" # TODO # seasonal coherence averages - # "--seasonal-coherence-files dynamic_ancillary_files/seasonal_coherence_files/* " # Troposphere files: " --troposphere-files ./dynamic_ancillary_files/troposphere_files/*" # Ionosphere files: @@ -49,7 +48,7 @@ def setup_delivery(cfg_dir: Path, mode: ProcessingMode): f" -o {outfile}" ) print(cmd) - subprocess.run(cmd, shell=True) + subprocess.run(cmd, shell=True, check=False) return outfile @@ -77,7 +76,7 @@ def setup_delivery(cfg_dir: Path, mode: ProcessingMode): ) cmd = f"python {convert_config} {dolphin_cfg_file} {arg_string}" print(cmd) - subprocess.run(cmd, shell=True) + subprocess.run(cmd, shell=True, check=False) # Remove the `dolphin` yamls for f in cfg_dir.glob("dolphin_config*.yaml"): f.unlink() diff --git a/scripts/run_repeated_nrt.py b/scripts/run_repeated_nrt.py index 80c5b76..eda6abd 100755 --- a/scripts/run_repeated_nrt.py +++ b/scripts/run_repeated_nrt.py @@ -31,57 +31,57 @@ def _create_cfg( amplitude_mean_files: Sequence[Filename] = [], amplitude_dispersion_files: Sequence[Filename] = [], strides: Mapping[str, int] = {"x": 6, "y": 3}, - work_dir: Path = Path("."), + work_dir: Path = Path(), n_parallel_bursts: int = 1, ): # strides = {"x": 1, "y": 1} interferogram_network: dict[str, Any] if first_ministack: - interferogram_network = dict( - network_type=InterferogramNetworkType.SINGLE_REFERENCE - ) + interferogram_network = { + "network_type": InterferogramNetworkType.SINGLE_REFERENCE + } else: - interferogram_network = dict( - network_type=InterferogramNetworkType.MANUAL_INDEX, - indexes=[(0, -1)], - ) + interferogram_network = { + "network_type": InterferogramNetworkType.MANUAL_INDEX, + "indexes": [(0, -1)], + } cfg = DisplacementWorkflow( # Things that change with each workflow run cslc_file_list=slc_files, - input_options=dict(subdataset=OPERA_DATASET_NAME), + input_options={"subdataset": OPERA_DATASET_NAME}, interferogram_network=interferogram_network, amplitude_mean_files=amplitude_mean_files, amplitude_dispersion_files=amplitude_dispersion_files, # Configurable from CLI inputs: - output_options=dict( - strides=strides, - ), - phase_linking=dict( - ministack_size=1000, # for single update, process in one ministack - half_window={"x": half_window_size[0], "y": half_window_size[1]}, - shp_method=shp_method, - ), + output_options={ + "strides": strides, + }, + phase_linking={ + "ministack_size": 1000, # for single update, process in one ministack + "half_window": {"x": half_window_size[0], "y": half_window_size[1]}, + "shp_method": shp_method, + }, work_directory=work_dir, - worker_settings=dict( + worker_settings={ # block_size_gb=block_size_gb, - n_parallel_bursts=n_parallel_bursts, - n_workers=4, - threads_per_worker=8, - ), + "n_parallel_bursts": n_parallel_bursts, + "n_workers": 4, + "threads_per_worker": 8, + }, # ps_options=dict( # amp_dispersion_threshold=amp_dispersion_threshold, # ), # log_file=log_file, # ) # Definite hard coded things - unwrap_options=dict( - unwrap_method="snaphu", - run_unwrap=run_unwrap, - ntiles=(2, 2), - downsample_factor=(3, 3), + unwrap_options={ + "unwrap_method": "snaphu", + "run_unwrap": run_unwrap, + "ntiles": (2, 2), + "downsample_factor": (3, 3), # CHANGEME: or else run in background somehow? - ), + }, save_compressed_slc=True, # always save, and only sometimes will we grab it # workflow_name=workflow_name, ) diff --git a/src/disp_s1/cli/run.py b/src/disp_s1/cli/run.py index f0455e9..3f715b2 100644 --- a/src/disp_s1/cli/run.py +++ b/src/disp_s1/cli/run.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python import click __all__ = ["run"] diff --git a/src/disp_s1/create.py b/src/disp_s1/create.py index 4d4e07d..b1e61c1 100644 --- a/src/disp_s1/create.py +++ b/src/disp_s1/create.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python import logging from pathlib import Path from typing import Any @@ -30,9 +29,9 @@ def get_params( Path(f"{process_dir}/unwrapped/").glob(f"{pair}.unw.tif") ) logger.info(param_dict["unw_filename"]) - except StopIteration: + except StopIteration as e: logger.error("Check if the pair %s exists", pair) - raise FileNotFoundError(f"Pair {pair} not found") + raise FileNotFoundError(f"Pair {pair} not found") from e param_dict["conncomp_filename"] = next( Path(f"{process_dir}/unwrapped/").glob(f"{pair}.unw.conncomp.tif") ) diff --git a/src/disp_s1/ionosphere.py b/src/disp_s1/ionosphere.py index 728a89d..46e41ce 100644 --- a/src/disp_s1/ionosphere.py +++ b/src/disp_s1/ionosphere.py @@ -36,7 +36,7 @@ def download_ionex_for_slcs( logger.info(f"Found {len(date_to_file_list)} dates in the input files.") output_files = [] - for input_date_tuple, file_list in date_to_file_list.items(): + for input_date_tuple, _file_list in date_to_file_list.items(): input_date = input_date_tuple[0] logger.info("Downloading for %s", input_date) f = download_ionex_for_date(input_date, dest_dir=dest_dir, verbose=verbose) @@ -78,7 +78,7 @@ def download_ionex_for_date( wget_cmd.append("--quiet") logger.info('Running command: "%s"', " ".join(wget_cmd)) - subprocess.run(wget_cmd, cwd=dest_dir) + subprocess.run(wget_cmd, cwd=dest_dir, check=False) return dest_file diff --git a/src/disp_s1/main.py b/src/disp_s1/main.py index 69efc30..4849ce6 100644 --- a/src/disp_s1/main.py +++ b/src/disp_s1/main.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python from __future__ import annotations import multiprocessing as mp @@ -31,8 +30,7 @@ def run( Parameters ---------- cfg : DisplacementWorkflow - [`DisplacementWorkflow`][dolphin.workflows.config.DisplacementWorkflow] object for controlling the - workflow. + `DisplacementWorkflow` object for controlling the workflow. debug : bool, optional Enable debug logging, by default False. pge_runconfig : RunConfig, optional @@ -92,7 +90,7 @@ def run( else: # grab the only key (either a burst, or "") and use that - b = list(grouped_slc_files.keys())[0] + b = next(iter(grouped_slc_files.keys())) wrapped_phase_cfgs = [(b, cfg)] ifg_file_list: list[Path] = [] diff --git a/src/disp_s1/pge_runconfig.py b/src/disp_s1/pge_runconfig.py index f4b0be4..79a5e12 100644 --- a/src/disp_s1/pge_runconfig.py +++ b/src/disp_s1/pge_runconfig.py @@ -42,7 +42,7 @@ class InputFileGroup(YamlModel): class DynamicAncillaryFileGroup(YamlModel): """A group of dynamic ancillary files.""" - algorithm_parameters_file: Path = Field( # type: ignore + algorithm_parameters_file: Path = Field( default=..., description="Path to file containing SAS algorithm parameters.", ) @@ -128,7 +128,7 @@ class PrimaryExecutable(YamlModel): class ProductPathGroup(YamlModel): """Group describing the product paths.""" - product_path: Path = Field( # type: ignore + product_path: Path = Field( default=..., description="Directory where PGE will place results", ) @@ -159,7 +159,7 @@ class ProductPathGroup(YamlModel): class AlgorithmParameters(YamlModel): - """Class containing all the other [`DisplacementWorkflow`][dolphin.workflows.config] classes.""" + """Class containing all the other `DisplacementWorkflow` classes.""" # Options for each step in the workflow ps_options: PsOptions = Field(default_factory=PsOptions) @@ -217,8 +217,8 @@ def model_construct(cls, **kwargs): ) def to_workflow(self): - """Convert to a [`DisplacementWorkflow`][dolphin.workflows.config.DisplacementWorkflow] object.""" - # We need to go to/from the PGE format to our internal DisplacementWorkflow object: + """Convert to a `DisplacementWorkflow` object.""" + # We need to go to/from the PGE format to dolphin's DisplacementWorkflow: # Note that the top two levels of nesting can be accomplished by wrapping # the normal model export in a dict. # @@ -240,7 +240,7 @@ def to_workflow(self): self.dynamic_ancillary_file_group.algorithm_parameters_file ) param_dict = algorithm_parameters.model_dump() - input_options = dict(subdataset=param_dict.pop("subdataset")) + input_options = {"subdataset": param_dict.pop("subdataset")} # Convert the frame_id into an output bounding box frame_to_burst_file = self.static_ancillary_file_group.frame_to_burst_json @@ -251,7 +251,7 @@ def to_workflow(self): param_dict["output_options"]["bounds"] = bounds param_dict["output_options"]["bounds_epsg"] = bounds_epsg - # This get's unpacked to load the rest of the parameters for the DisplacementWorkflow + # unpacked to load the rest of the parameters for the DisplacementWorkflow return DisplacementWorkflow( cslc_file_list=cslc_file_list, input_options=input_options, @@ -277,13 +277,13 @@ def from_workflow( save_compressed_slc: bool = False, output_directory: Optional[Path] = None, ): - """Convert from a [`DisplacementWorkflow`][dolphin.workflows.config.DisplacementWorkflow] object. + """Convert from a `DisplacementWorkflow` object. This is the inverse of the to_workflow method, although there are more fields in the PGE version, so it's not a 1-1 mapping. The arguments, like `frame_id` or `algorithm_parameters_file`, are not in the - [`DisplacementWorkflow`][dolphin.workflows.config.DisplacementWorkflow] object, so we need to pass + `DisplacementWorkflow` object, so we need to pass those in as arguments. This is can be used as preliminary setup to further edit the fields, or as a @@ -297,7 +297,7 @@ def from_workflow( algo_keys = set(AlgorithmParameters.model_fields.keys()) alg_param_dict = workflow.model_dump(include=algo_keys) AlgorithmParameters(**alg_param_dict).to_yaml(algorithm_parameters_file) - # This gets unpacked to load the rest of the parameters for the DisplacementWorkflow + # unpacked to load the rest of the parameters for the DisplacementWorkflow return cls( input_file_group=InputFileGroup( diff --git a/src/disp_s1/plotting.py b/src/disp_s1/plotting.py index be998d1..a02edde 100644 --- a/src/disp_s1/plotting.py +++ b/src/disp_s1/plotting.py @@ -81,14 +81,14 @@ def plot_product( class HDF5Explorer: """Class which maps an HDF5 file and allows tab-completion to explore datasets.""" - def __init__(self, hdf5_filepath: str, load_less_than: float = 1e3): + def __init__(self, hdf5_filepath: str, load_less_than: float = 1e3): # noqa: D107 self.hdf5_filepath = hdf5_filepath self._hf = h5py.File(hdf5_filepath, "r") self._root_group = _HDF5GroupExplorer( self._hf["/"], load_less_than=load_less_than ) - def close(self): + def close(self): # noqa: D102 self._hf.close() def __getattr__(self, name): diff --git a/src/disp_s1/product.py b/src/disp_s1/product.py index ac3b1d4..4ccd242 100644 --- a/src/disp_s1/product.py +++ b/src/disp_s1/product.py @@ -32,14 +32,14 @@ CORRECTIONS_GROUP_NAME = "corrections" IDENTIFICATION_GROUP_NAME = "identification" METADATA_GROUP_NAME = "metadata" -GLOBAL_ATTRS = dict( - Conventions="CF-1.8", - contact="operaops@jpl.nasa.gov", - institution="NASA JPL", - mission_name="OPERA", - reference_document="TBD", - title="OPERA L3_DISP-S1 Product", -) +GLOBAL_ATTRS = { + "Conventions": "CF-1.8", + "contact": "operaops@jpl.nasa.gov", + "institution": "NASA JPL", + "mission_name": "OPERA", + "reference_document": "TBD", + "title": "OPERA L3_DISP-S1 Product", +} # Convert chunks to a tuple or h5py errors HDF5_OPTS = io.DEFAULT_HDF5_OPTIONS.copy() @@ -66,7 +66,7 @@ def create_output_product( ps_mask_filename: Filename, pge_runconfig: RunConfig, cslc_files: Sequence[Filename], - corrections: dict[str, ArrayLike] = {}, + corrections: Optional[dict[str, ArrayLike]] = None, ): """Create the OPERA output product in NetCDF format. @@ -93,6 +93,8 @@ def create_output_product( Used to add extra metadata to the output file. """ # Read the Geotiff file and its metadata + if corrections is None: + corrections = {} crs = io.get_raster_crs(unw_filename) gt = io.get_raster_gt(unw_filename) unw_arr_ma = io.load_gdal(unw_filename, masked=True) @@ -145,9 +147,7 @@ def create_output_product( ifg_corr_arr, io.load_gdal(ps_mask_filename), ] - disp_products = [ - (nfo, data) for nfo, data in zip(disp_products_info, disp_data) - ] + disp_products = list(zip(disp_products_info, disp_data)) for nfo, data in disp_products: _create_geo_dataset( group=f, @@ -190,7 +190,7 @@ def _create_corrections_group( start_time: datetime.datetime, ) -> None: with h5netcdf.File(output_name, "a") as f: - # Create the group holding phase corrections that were used on the unwrapped phase + # Create the group holding phase corrections used on the unwrapped phase corrections_group = f.create_group(CORRECTIONS_GROUP_NAME) corrections_group.attrs[ "description" @@ -213,7 +213,7 @@ def _create_corrections_group( data=troposphere, description="Tropospheric phase delay used to correct the unwrapped phase", fillvalue=np.nan, - attrs=dict(units="radians"), + attrs={"units": "radians"}, ) ionosphere = corrections.get("ionosphere", empty_arr) _create_geo_dataset( @@ -222,7 +222,7 @@ def _create_corrections_group( data=ionosphere, description="Ionospheric phase delay used to correct the unwrapped phase", fillvalue=np.nan, - attrs=dict(units="radians"), + attrs={"units": "radians"}, ) solid_earth = corrections.get("solid_earth", empty_arr) _create_geo_dataset( @@ -231,7 +231,7 @@ def _create_corrections_group( data=solid_earth, description="Solid Earth tide used to correct the unwrapped phase", fillvalue=np.nan, - attrs=dict(units="radians"), + attrs={"units": "radians"}, ) plate_motion = corrections.get("plate_motion", empty_arr) _create_geo_dataset( @@ -240,7 +240,7 @@ def _create_corrections_group( data=plate_motion, description="Phase ramp caused by tectonic plate motion", fillvalue=np.nan, - attrs=dict(units="radians"), + attrs={"units": "radians"}, ) # Make a scalar dataset for the reference point reference_point = corrections.get("reference_point", 0.0) @@ -256,8 +256,14 @@ def _create_corrections_group( ), dtype=int, # Note: the dataset contains attributes with lists, since the reference - # could have come from multiple points (e.g. some boxcar average of an area). - attrs=dict(units="unitless", rows=[], cols=[], latitudes=[], longitudes=[]), + # could have come from multiple points (e.g. boxcar average of an area). + attrs={ + "units": "unitless", + "rows": [], + "cols": [], + "latitudes": [], + "longitudes": [], + }, ) @@ -316,7 +322,7 @@ def _create_identification_group( data=get_union_polygon(cslc_files).wkt, fillvalue=None, description="WKT representation of bounding polygon of the image", - attrs=dict(units="degrees"), + attrs={"units": "degrees"}, ) wavelength, attrs = _parse_cslc_product.get_radar_wavelength(cslc_files[-1]) @@ -483,7 +489,7 @@ def _create_yx_dsets( y, x = _create_yx_arrays(gt, shape) if not group.dimensions: - dims = dict(y=y.size, x=x.size) + dims = {"y": y.size, "x": x.size} if include_time: dims["time"] = 1 group.dimensions = dims @@ -539,13 +545,13 @@ def _create_grid_mapping(group, crs: pyproj.CRS, gt: list[float]) -> h5netcdf.Va # Also add the GeoTransform gt_string = " ".join([str(x) for x in gt]) dset.attrs.update( - dict( - GeoTransform=gt_string, - units="unitless", - long_name=( + { + "GeoTransform": gt_string, + "units": "unitless", + "long_name": ( "Dummy variable containing geo-referencing metadata in attributes" ), - ) + } ) return dset @@ -577,7 +583,7 @@ def form_name(filename: Path, burst: str): # Input metadata is stored within the GDAL "DOLPHIN" domain metadata_dict = io.get_raster_metadata(comp_slc_file, "DOLPHIN") - attrs = dict(units="unitless") + attrs = {"units": "unitless"} attrs.update(metadata_dict) logger.info(f"Writing {outname}") diff --git a/src/disp_s1/product_info.py b/src/disp_s1/product_info.py index b395696..d7237c4 100644 --- a/src/disp_s1/product_info.py +++ b/src/disp_s1/product_info.py @@ -24,7 +24,7 @@ def unwrapped_phase(cls): name="unwrapped_phase", description="Unwrapped phase", fillvalue=np.nan, - attrs=dict(units="radians"), + attrs={"units": "radians"}, ) @classmethod @@ -34,7 +34,7 @@ def connected_component_labels(cls): name="connected_component_labels", description="Connected component labels of the unwrapped phase", fillvalue=0, - attrs=dict(units="unitless"), + attrs={"units": "unitless"}, ) @classmethod @@ -44,7 +44,7 @@ def temporal_coherence(cls): name="temporal_coherence", description="Temporal coherence of phase inversion", fillvalue=np.nan, - attrs=dict(units="unitless"), + attrs={"units": "unitless"}, ) @classmethod @@ -57,7 +57,7 @@ def interferometric_correlation(cls): " multilooked interferogram." ), fillvalue=np.nan, - attrs=dict(units="unitless"), + attrs={"units": "unitless"}, ) @classmethod @@ -70,7 +70,7 @@ def persistent_scatterer_mask(cls): " output grid." ), fillvalue=255, - attrs=dict(units="unitless"), + attrs={"units": "unitless"}, ) diff --git a/src/disp_s1/utils.py b/src/disp_s1/utils.py index b90c667..1bd2c2e 100644 --- a/src/disp_s1/utils.py +++ b/src/disp_s1/utils.py @@ -27,8 +27,8 @@ def read_zipped_json(filename: Filename): """ if Path(filename).suffix == ".zip": with zipfile.ZipFile(filename) as zf: - bytes = zf.read(str(Path(filename).name).replace(".zip", "")) - return json.loads(bytes.decode()) + b = zf.read(str(Path(filename).name).replace(".zip", "")) + return json.loads(b.decode()) else: with open(filename) as f: return json.load(f) @@ -46,6 +46,7 @@ def get_frame_json( json_file : Filename, optional The path to the JSON file containing the frame-to-burst mapping. If `None`, uses the zip file contained in `disp_s1/data` + Returns ------- dict diff --git a/src/disp_s1/validate.py b/src/disp_s1/validate.py index 8e40999..762a2b6 100755 --- a/src/disp_s1/validate.py +++ b/src/disp_s1/validate.py @@ -17,14 +17,10 @@ class ValidationError(Exception): """Raised when a product fails a validation check.""" - pass - class ComparisonError(ValidationError): """Exception raised when two datasets do not match.""" - pass - def compare_groups( golden_group: h5py.Group, diff --git a/tests/make_netcdf.py b/tests/make_netcdf.py index 2499e5e..7bab536 100644 --- a/tests/make_netcdf.py +++ b/tests/make_netcdf.py @@ -1,8 +1,8 @@ -#!/usr/bin/env python from __future__ import annotations import argparse import os +from pathlib import Path from typing import Union import h5py @@ -34,12 +34,13 @@ def create_test_nc( ): if isinstance(subdir, list): # Create groups in the same file to make multiple SubDatasets - return [ + [ create_test_nc( outfile, epsg, s, data, data_ds_name, shape, dtype, xoff, yoff, "a" ) for s in subdir ] + return if data is None: data = np.ones(shape, dtype=dtype) @@ -87,6 +88,7 @@ def create_test_nc( # CF 1.7+ requires this attribute to be named "crs_wkt" # spatial_ref is old GDAL way. Using that for testing only. srs_ds.attrs[srs_name] = crs.to_wkt() + srs_ds.attrs["crs_wkt"] = crs.to_wkt() srs_ds.attrs.update(crs.to_cf()) @@ -133,11 +135,12 @@ def get_cli_args(): args = get_cli_args() # Check output extension is .nc (Only for verification with GDAL) - if os.path.splitext(args.outfile)[1] != ".nc": - raise Exception( - "This script uses GDAL's netcdf4 driver for verification and expects output" - " file to have an extension of .nc" + if Path(args.outfile).suffix != ".nc": + msg = ( + "This script uses GDAL's netcdf4 driver for verification and expects " + "output file to have an extension of .nc" ) + raise ValueError(msg) create_test_nc(args.outfile, epsg=args.epsg, subdir=args.subdir) gdalinfo = gdal.Info(args.outfile, format="json")